query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Set up node resource group for the ManagedCluster object.
|
def set_up_node_resource_group(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
mc.node_resource_group = self.context.get_node_resource_group()
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def _init_cluster(self):\n self._Init_Cluster()",
"def setup(self, cluster):\n raise NotImplementedError()",
"def __init__(__self__,\n resource_name: str,\n args: ManagedNetworkGroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init_cluster(self, cluster):\n self.___init_nodes(cluster)\n self.__clusterop.async_rebalance(\n cluster.get_nodes(),\n cluster.get_nodes()[1:],\n []).result()",
"def getClusterSetup(self):\n data = {}\n data[\"parameters\"] = self.config.getACSParams()\n \n fqdn = {}\n fqdn[\"master\"] = self.getManagementEndpoint()\n fqdn[\"agent\"] = self.getAgentEndpoint()\n data[\"domains\"] = fqdn\n \n data[\"sshTunnel\"] = \"ssh -o StrictHostKeyChecking=no -L 80:localhost:80 -N \" + self.config.get('ACS', 'username') + \"@\" + self.getManagementEndpoint() + \" -p 2200\"\n\n azure = {}\n azure['resourceGroup'] = self.config.get('Group', 'name')\n data[\"azure\"] = azure\n\n return data",
"def create(self, context=None):\n values = self.obj_get_changes()\n db_nodegroup = self.dbapi.create_nodegroup(values)\n self._from_db_object(self, db_nodegroup)",
"def _initialize(self):\n try:\n self._azure_client.get_resource_group(self.resource_group)\n\n except CloudError as cloud_error:\n if cloud_error.error.error == \"ResourceGroupNotFound\":\n resource_group_params = {'location': self.region_name}\n try:\n self._azure_client.\\\n create_resource_group(self.resource_group,\n resource_group_params)\n except CloudError as cloud_error2: # pragma: no cover\n if cloud_error2.error.error == \"AuthorizationFailed\":\n mess = 'The following error was returned by Azure:\\n' \\\n '%s\\n\\nThis is likely because the Role' \\\n 'associated with the given credentials does ' \\\n 'not allow for Resource Group creation.\\nA ' \\\n 'Resource Group is necessary to manage ' \\\n 'resources in Azure. You must either ' \\\n 'provide an existing Resource Group as part ' \\\n 'of the configuration, or elevate the ' \\\n 'associated role.\\nFor more information on ' \\\n 'roles, see: https://docs.microsoft.com/' \\\n 'en-us/azure/role-based-access-control/' \\\n 'overview\\n' % cloud_error2\n raise ProviderConnectionException(mess)\n else:\n raise cloud_error2\n\n else:\n raise cloud_error",
"def createcluster(self):\n for hostitem in OTHER_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n alive = str(REMAINING_NODES)[1:-1]\n print \"{}\\nThe following nodes are alive in cluster:{}\\n {}\".format(\n RED, WHITE, alive)\n print \"\\n\\nTo boostrap a new cluster you need to switch them off\\n\"\n os.sys.exit(1)\n else:\n if self.mode == \"new\" and not self.force:\n ask('\\nThis operation will destroy the local data')\n clean_dir(self.datadir)\n initialize_mysql(self.datadir)\n bootstrap_mysql(self.mode)\n if self.mode == \"new\":\n create_monitor_table()\n ALL_NODES.append(\"localhost\")\n for creditem in CREDENTIALS:\n create_users(creditem)\n print \"\"\n drop_anonymous()",
"def setup(ctx, cluster_url):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster setup subcommand\")",
"def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center",
"def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)",
"def test_create_cluster_network(self):\n pass",
"def initialize_cluster(cluster):\n logger.info('Creating a new cluster for %s...', cluster)\n\n configuration = ClusterConfiguration(version=__version__)\n ztransaction = cluster.zookeeper.transaction()\n ztransaction.create(cluster.path, BinaryCodec(ClusterConfiguration).encode(configuration))\n ztransaction.create(cluster.get_set_path())\n commit(ztransaction)",
"def __create(self):\n pass\n\n # create at cluster-provider\n # get kubeconfig\n # wait for api\n # ^ could be async and seperate steps?",
"def test_create_cluster_role(self):\n pass",
"def startCluster():\n # attempt to create a cluster\n print(\"Creating a Redshift cluster...\")\n try:\n redshift.create_cluster(\n\n # hardware parameters\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n # database access configuration\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n\n # accesses\n IamRoles=[iam.get_role(RoleName=DWH_IAM_ROLE_NAME)[\"Role\"][\"Arn\"]]\n )\n except Exception as e:\n print(e)\n return\n\n # wait for cluster to spin up\n print(\"Waiting for cluster to be available...\")\n while redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )[\"Clusters\"][0][\"ClusterStatus\"] != \"available\":\n time.sleep(30)\n print(\"\\tChecking status again...\")",
"def create(self):\n print(\"+ Creating cluster: {}. This may take a few minutes ...\".format(self.name_hyphenated))\n if self.num_gpus == 0:\n out = util.syscall(\"gcloud container clusters create {} -m {} --disk-size {} --num-nodes {} {}\".\n format(self.name_hyphenated, self.machine_type, self.disk_size, self.num_nodes,\n \"--zone \" + self.location if self.location else \"\"), return_outputs=\"as_str\")\n else:\n out = util.syscall(\"gcloud container clusters create {} --enable-cloud-logging --enable-cloud-monitoring \"\n \"--accelerator type={},count={} {} -m {} --disk-size {} --enable-kubernetes-alpha \"\n \"--image-type UBUNTU --num-nodes {} --cluster-version 1.9.2-gke.1 --quiet\".\n format(self.name_hyphenated, self.gpu_type, self.gpus_per_node,\n \"--zone \"+self.location if self.location else \"\", self.machine_type, self.disk_size,\n self.num_nodes), return_outputs=\"as_str\")\n # check output of cluster generating code\n if re.search(r'error', out, re.IGNORECASE):\n raise util.TFCliError(out)\n else:\n print(\"+ Successfully created cluster.\")\n self.instances, self.primary_name = util.get_compute_instance_specs(self.name_hyphenated)\n self.started = True\n\n # install NVIDIA drivers on machines per local kubectl\n if self.num_gpus > 0:\n print(\"+ Installing NVIDIA GPU drivers and k8s device plugins ...\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/GoogleCloudPlatform/\"\n \"container-engine-accelerators/k8s-1.9/daemonset.yaml\")\n util.syscall(\"kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n\n print(\"+ Done. Cluster: {} created.\".format(self.name_hyphenated))",
"def cluster_setup(nodes, pcsclustername=\"pcscluster\", extra_args=None):\n cmd = [\"pcs\", \"cluster\", \"setup\"]\n\n if __use_new_commands():\n cmd += [pcsclustername]\n else:\n cmd += [\"--name\", pcsclustername]\n\n cmd += nodes\n if isinstance(extra_args, (list, tuple)):\n cmd += extra_args\n\n log.debug(\"Running cluster setup: %s\", cmd)\n\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)",
"def create_cluster(self, provision_details, project_id=\"\"):\n response = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/clusters'\n ,body=provision_details)\n return response",
"def init_cluster(\n self,\n node_name,\n ):\n # Gets the node IP address.\n ip = self.get_node_ip(node_name)\n\n # Initializes the swarm.\n docker_utils.swarm_init(\n hostname=ip,\n ssh_port=SSH_PORT,\n ssh_username=self.get_ssh_username(node_name),\n ssh_private_key_file=self.get_ssh_private_key_file(node_name),\n executor=node_name,\n logger=self._logger,\n )",
"def test_create_resource_group(self):\n pass",
"def set_up(self):\n \n print '\\n', '=' * 20, \"SETUP HOST %s\" % (self.address,), '=' * 20\n \n # Rsync. Copy process_manager.py\n path = 'cluster_test_%d' % self.address[1]\n \n pm_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'base_remote_resources'))\n \n src = [pm_path]\n \n # To working dir in remote machine.\n dest = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n if not self._rsync(src, dest):\n # Rsync fails.\n print self.address, \"encounters problems when synchronizing files.\"\n return\n \n print \"Syncing local resources...\"\n \n all_syncs = []\n if \"\" in console_config._local_resource_syncs:\n all_syncs.extend(console_config._local_resource_syncs[\"\"])\n \n if self.address[0] in console_config._local_resource_syncs:\n all_syncs.extend(console_config._local_resource_syncs[self.address[0]])\n \n for local_path, rel_path in all_syncs:\n local_path = os.path.abspath(local_path)\n if not self._rsync(local_path, \"%s%s%s\" % (dest, os.sep, rel_path)):\n print self.address, \"encounters problems when syncing local files.\"\n return\n \n print \"Sync'd.\"\n \n print \"Attaching downloadable resources...\"\n \n all_dls = []\n if \"\" in console_config._remote_resource_downloads:\n all_dls.extend(console_config._remote_resource_downloads[\"\"])\n \n if self.address[0] in console_config._remote_resource_downloads:\n all_dls.extend(console_config._remote_resource_downloads[self.address[0]])\n \n for url, rel_path in all_dls:\n if not self._download(url, rel_path):\n print self.address, \"encounters problems when downloading files.\"\n return\n \n print \"Attached.\"\n \n print \"Running setup scripts...\"\n \n if self.address[0] in console_config._setup_scripts:\n \n setup_scripts = console_config._setup_scripts[self.address[0]]\n \n temp_dir = tempfile.mkdtemp();\n \n for i, snippet in enumerate(setup_scripts):\n \n name, snippet, lang, isFile, shouldRun = snippet\n \n filename = os.path.join(temp_dir, name)\n \n if not isFile :\n with open(filename, 'w') as file:\n file.write(snippet) \n else:\n shutil.copyfile(snippet, filename)\n \n remote_snippet_dir = dest + \"/base_remote_resources/scripts\"\n \n if not self._rsync(temp_dir + os.sep, remote_snippet_dir):\n print self.address, \"encounters problems when uploading snippets.\"\n return\n \n shutil.rmtree(temp_dir)\n \n # Run snippets\n for i, snippet in enumerate(setup_scripts):\n \n name, snippet, lang, isFile, shouldRun = snippet\n if not shouldRun: continue\n \n remote_filename = \"base_remote_resources/scripts/%s\" % name\n \n run_cmd = \". ./%s\" % remote_filename\n \n if not self._ssh(run_cmd, use_tty=True):\n print self.address, \"encounters problems when executing snippets.\"\n return\n \n \n # Use SSH to run process manager.\n # nohup python process_manager.py >process_manager.out 2>&1\n # </dev/null &\n ssh_command = 'cd base_remote_resources; bash restart_process_manager.sh %d' % self.address[1]\n \n # print ssh_command\n \n if self._ssh(ssh_command):\n print self.address, \"process manager has been set up.\"\n else:\n print self.address, \"encounters problems when running SSH.\"\n return\n \n print \"Done.\"",
"def test_create_config_nodes(self):\n with self.override_role():\n self._create_config_node()",
"def test_crud_cluster_node(self):\n # create the parent cluster\n cluster_id = self._create_cluster()\n\n # create cluster node\n response = self._create_cluster_node(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n # list existing objects\n node_id = self._list_cluster_node(cluster_id)\n\n # check it exists\n node_id = self._check_cluster_node_exists(cluster_id, node_id)\n\n # delete the object\n response = self._delete_cluster_node(cluster_id, node_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # check it no longer exists\n self._check_no_cluster_nodes_exist(cluster_id)",
"def _set_group_resource(self, _g):\n\n if isinstance(_g, Server):\n return\n\n for _, sg in _g.subgroups.items():\n self._set_group_resource(sg)\n _g.vCPUs += sg.vCPUs\n _g.mem += sg.mem\n _g.local_volume_size += sg.local_volume_size",
"def config(self, cluster_name, name, username, version, int_netmask, int_ip_low,\n int_ip_high, ext_netmask, ext_ip_low, ext_ip_high, gateway, dns_servers,\n encoding, sc_zonename, smartconnect_ip, join_cluster, compliance, txn_id):\n logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper())\n resp = {'content' : {}, 'error': None, 'params': {}}\n logger.info('Task starting')\n nodes = vmware.show_onefs(username)\n node = nodes.get(name, None)\n if not node:\n error = \"No node named {} found\".format(name)\n resp['error'] = error\n logger.error(error)\n return resp\n elif node['meta']['configured']:\n error = \"Cannot configure a node that's already configured\"\n resp['error'] = error\n logger.error(error)\n else:\n # Lets set it up!\n logger.info('Found node')\n console_url = node['console']\n if join_cluster:\n logger.info('Joining node to cluster {}'.format(cluster_name))\n setup_onefs.join_existing_cluster(console_url, cluster_name, compliance, logger)\n else:\n logger.info('Setting up new cluster named {}'.format(cluster_name))\n setup_onefs.configure_new_cluster(version=version,\n console_url=console_url,\n cluster_name=cluster_name,\n int_netmask=int_netmask,\n int_ip_low=int_ip_low,\n int_ip_high=int_ip_high,\n ext_netmask=ext_netmask,\n ext_ip_low=ext_ip_low,\n ext_ip_high=ext_ip_high,\n gateway=gateway,\n dns_servers=dns_servers,\n encoding=encoding,\n sc_zonename=sc_zonename,\n smartconnect_ip=smartconnect_ip,\n compliance=compliance,\n logger=logger)\n node['meta']['configured'] = True\n vmware.update_meta(username, name, node['meta'])\n logger.info('Task complete')\n return resp",
"def setup(self, *args, **kwargs):\n conf_file = os.environ.get(\"VCLUSTER_INFO\")\n if not conf_file:\n raise Exception(\n \"Environment variable VCLUSTER_INFO \"\n + \"not set to vcluster output configuration file\"\n )\n self.vcluster = vcluster.vcluster_from_conf(conf_file)\n\n self.component_name_map.update(\n {\n components.MesosMaster().name: \"mesos-master\",\n components.MesosAgent().name: \"mesos-slave\",\n components.Zookeeper().name: \"zookeeper\",\n components.HostMgr().name: \"hostmgr\",\n components.JobMgr().name: \"jobmgr\",\n components.ResMgr().name: \"resmgr\",\n components.BatchPlacementEngine().name: \"placement\",\n components.StatelessPlacementEngine().name: \"placement_stateless\",\n }\n )",
"def _setup_test_cluster(self, return_cluster, name, create_args):\n stack_name = '{0}_stack'.format(name)\n templ, self.stack = self._setup_test_stack(stack_name, TEMPLATE)\n cluster_instance = cbd.CloudBigData('%s_name' % name,\n templ.resource_definitions(\n self.stack)['cbd_cluster'],\n self.stack)\n self._stubout_create(return_cluster)\n return cluster_instance",
"def __init__(__self__,\n resource_name: str,\n args: ClusterArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ..."
] |
[
"0.6686903",
"0.6289803",
"0.61468226",
"0.6087124",
"0.60325193",
"0.5909405",
"0.582536",
"0.5820801",
"0.57012415",
"0.5641262",
"0.56390566",
"0.5630276",
"0.5610059",
"0.55397946",
"0.55355084",
"0.5534161",
"0.552077",
"0.55206263",
"0.55193686",
"0.55183315",
"0.551642",
"0.54991406",
"0.54979825",
"0.54737407",
"0.5466344",
"0.545169",
"0.5448263",
"0.54298747",
"0.541674",
"0.540861"
] |
0.78996533
|
0
|
Set up supportPlan for the ManagedCluster object.
|
def set_up_k8s_support_plan(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
support_plan = self.context.get_k8s_support_plan()
if support_plan == KubernetesSupportPlan.AKS_LONG_TERM_SUPPORT:
if mc is None or mc.sku is None or mc.sku.tier.lower() != CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM.lower():
raise AzCLIError("Long term support is only available for premium tier clusters.")
mc.support_plan = support_plan
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setup(self, cluster):\n raise NotImplementedError()",
"def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def update_k8s_support_plan(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n support_plan = self.context.get_k8s_support_plan()\n if support_plan == KubernetesSupportPlan.AKS_LONG_TERM_SUPPORT:\n if mc is None or mc.sku is None or mc.sku.tier.lower() != CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM.lower():\n raise AzCLIError(\"Long term support is only available for premium tier clusters.\")\n\n mc.support_plan = support_plan\n return mc",
"def setup(ctx, cluster_url):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster setup subcommand\")",
"def test_01_dedicated_cluster_allocation(self):\n\n # Step 1\n dedicateCmd = dedicateCluster.dedicateClusterCmd()\n dedicateCmd.clusterid = self.clusters[0].id\n dedicateCmd.domainid = self.domain.id\n dedicateCmd.account = self.account_1.name\n self.apiclient.dedicateCluster(dedicateCmd)\n\n afcmd = listAffinityGroups.listAffinityGroupsCmd()\n afcmd.account = self.account_1.name\n afcmd.domainid = self.account_1.domainid\n affinitygr_list = self.apiclient.listAffinityGroups(afcmd)\n\n # Step 2\n self.vm = VirtualMachine.create(\n self.userapiclient_1,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_1.name,\n domainid=self.account_1.domainid,\n serviceofferingid=self.service_offering.id,\n affinitygroupids=[affinitygr_list[0].id],\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n # Steps to verify if VM is created on dedicated account\n vmlist = list_virtual_machines(self.apiclient,\n id=self.vm.id)\n\n hostlist = list_hosts(self.apiclient,\n id=vmlist[0].hostid)\n\n self.assertEqual(hostlist[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on dedicated clusture\"\n )\n # Step 3\n self.vm_1 = VirtualMachine.create(\n self.userapiclient_2,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_2.name,\n domainid=self.account_2.domainid,\n serviceofferingid=self.service_offering.id,\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n\n # Steps to verify if VM is created on dedicated account\n vmlist_1 = list_virtual_machines(self.apiclient,\n id=self.vm_1.id)\n\n hostlist_1 = list_hosts(self.apiclient,\n id=vmlist_1[0].hostid)\n\n self.assertNotEqual(hostlist_1[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on correct clusture\"\n )\n\n # Step 4\n routerList = list_routers(self.apiclient,\n clusterid=self.clusters[0].id,\n networkid=self.vm_1.nic[0].networkid\n )\n self.assertEqual(\n routerList,\n None,\n \"Check Dedicated cluster is used for virtual routers \\\n that belong to non-dedicated account\")\n\n return",
"def _init_cluster(self):\n self._Init_Cluster()",
"def setup(self, *args, **kwargs):\n conf_file = os.environ.get(\"VCLUSTER_INFO\")\n if not conf_file:\n raise Exception(\n \"Environment variable VCLUSTER_INFO \"\n + \"not set to vcluster output configuration file\"\n )\n self.vcluster = vcluster.vcluster_from_conf(conf_file)\n\n self.component_name_map.update(\n {\n components.MesosMaster().name: \"mesos-master\",\n components.MesosAgent().name: \"mesos-slave\",\n components.Zookeeper().name: \"zookeeper\",\n components.HostMgr().name: \"hostmgr\",\n components.JobMgr().name: \"jobmgr\",\n components.ResMgr().name: \"resmgr\",\n components.BatchPlacementEngine().name: \"placement\",\n components.StatelessPlacementEngine().name: \"placement_stateless\",\n }\n )",
"def set_up_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n return mc",
"def getClusterSetup(self):\n data = {}\n data[\"parameters\"] = self.config.getACSParams()\n \n fqdn = {}\n fqdn[\"master\"] = self.getManagementEndpoint()\n fqdn[\"agent\"] = self.getAgentEndpoint()\n data[\"domains\"] = fqdn\n \n data[\"sshTunnel\"] = \"ssh -o StrictHostKeyChecking=no -L 80:localhost:80 -N \" + self.config.get('ACS', 'username') + \"@\" + self.getManagementEndpoint() + \" -p 2200\"\n\n azure = {}\n azure['resourceGroup'] = self.config.get('Group', 'name')\n data[\"azure\"] = azure\n\n return data",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.CREATE\n )",
"def setup(self):\n if self.config.resume:\n if self._try_load_plan():\n Run().print_main(f\"#> Loaded plan from {self.plan_path}:\")\n Run().print_main(f\"#> num_chunks = {self.num_chunks}\")\n Run().print_main(f\"#> num_partitions = {self.num_chunks}\")\n Run().print_main(f\"#> num_embeddings_est = {self.num_embeddings_est}\")\n Run().print_main(f\"#> avg_doclen_est = {self.avg_doclen_est}\")\n return\n\n self.num_chunks = int(np.ceil(len(self.collection) / self.collection.get_chunksize()))\n\n # Saves sampled passages and embeddings for training k-means centroids later\n sampled_pids = self._sample_pids()\n avg_doclen_est = self._sample_embeddings(sampled_pids)\n\n # Select the number of partitions\n num_passages = len(self.collection)\n self.num_embeddings_est = num_passages * avg_doclen_est\n self.num_partitions = int(2 ** np.floor(np.log2(16 * np.sqrt(self.num_embeddings_est))))\n\n Run().print_main(f\"Creaing {self.num_partitions:,} partitions.\")\n Run().print_main(f\"*Estimated* {int(self.num_embeddings_est):,} embeddings.\")\n\n self._save_plan()",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def __init_cluster(self, cluster):\n self.___init_nodes(cluster)\n self.__clusterop.async_rebalance(\n cluster.get_nodes(),\n cluster.get_nodes()[1:],\n []).result()",
"def get_k8s_support_plan(self) -> Union[str, None]:\n # default to None\n support_plan = None\n # try to read the property value corresponding to the parameter from the `mc` object\n if self.mc and hasattr(self.mc, \"support_plan\") and self.mc.support_plan is not None:\n support_plan = self.mc.support_plan\n\n # if specified by customer, use the specified value\n support_plan = self.raw_param.get(\"k8s_support_plan\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return support_plan",
"def __create(self):\n pass\n\n # create at cluster-provider\n # get kubeconfig\n # wait for api\n # ^ could be async and seperate steps?",
"def test_create_hyperflex_cluster_profile(self):\n pass",
"def config(self, cluster_name, name, username, version, int_netmask, int_ip_low,\n int_ip_high, ext_netmask, ext_ip_low, ext_ip_high, gateway, dns_servers,\n encoding, sc_zonename, smartconnect_ip, join_cluster, compliance, txn_id):\n logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper())\n resp = {'content' : {}, 'error': None, 'params': {}}\n logger.info('Task starting')\n nodes = vmware.show_onefs(username)\n node = nodes.get(name, None)\n if not node:\n error = \"No node named {} found\".format(name)\n resp['error'] = error\n logger.error(error)\n return resp\n elif node['meta']['configured']:\n error = \"Cannot configure a node that's already configured\"\n resp['error'] = error\n logger.error(error)\n else:\n # Lets set it up!\n logger.info('Found node')\n console_url = node['console']\n if join_cluster:\n logger.info('Joining node to cluster {}'.format(cluster_name))\n setup_onefs.join_existing_cluster(console_url, cluster_name, compliance, logger)\n else:\n logger.info('Setting up new cluster named {}'.format(cluster_name))\n setup_onefs.configure_new_cluster(version=version,\n console_url=console_url,\n cluster_name=cluster_name,\n int_netmask=int_netmask,\n int_ip_low=int_ip_low,\n int_ip_high=int_ip_high,\n ext_netmask=ext_netmask,\n ext_ip_low=ext_ip_low,\n ext_ip_high=ext_ip_high,\n gateway=gateway,\n dns_servers=dns_servers,\n encoding=encoding,\n sc_zonename=sc_zonename,\n smartconnect_ip=smartconnect_ip,\n compliance=compliance,\n logger=logger)\n node['meta']['configured'] = True\n vmware.update_meta(username, name, node['meta'])\n logger.info('Task complete')\n return resp",
"def _setup_test_cluster(self, return_cluster, name, create_args):\n stack_name = '{0}_stack'.format(name)\n templ, self.stack = self._setup_test_stack(stack_name, TEMPLATE)\n cluster_instance = cbd.CloudBigData('%s_name' % name,\n templ.resource_definitions(\n self.stack)['cbd_cluster'],\n self.stack)\n self._stubout_create(return_cluster)\n return cluster_instance",
"def __init__(self, cluster_name: str, backend: 'backends.Backend',\n task: 'task_lib.Task', retry_until_up: bool) -> None:\n assert isinstance(backend, backends.CloudVmRayBackend), (\n 'Only CloudVMRayBackend is supported.')\n self.dag = sky.Dag()\n self.dag.add(task)\n self.cluster_name = cluster_name\n self.backend = backend\n self.retry_until_up = retry_until_up",
"def set_up_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def __init__(__self__, *,\n name: Optional[pulumi.Input[Union[str, 'ManagedClusterSKUName']]] = None,\n tier: Optional[pulumi.Input[Union[str, 'ManagedClusterSKUTier']]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)",
"def test_create_hyperflex_cluster_network_policy(self):\n pass",
"def test_allocate_virtualization_realm(self):\n pass",
"def init_mc(self) -> ManagedCluster:\n # Initialize a ManagedCluster object with mandatory parameter location.\n mc = self.models.ManagedCluster(\n location=self.context.get_location(),\n )\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def cluster_setup(nodes, pcsclustername=\"pcscluster\", extra_args=None):\n cmd = [\"pcs\", \"cluster\", \"setup\"]\n\n if __use_new_commands():\n cmd += [pcsclustername]\n else:\n cmd += [\"--name\", pcsclustername]\n\n cmd += nodes\n if isinstance(extra_args, (list, tuple)):\n cmd += extra_args\n\n log.debug(\"Running cluster setup: %s\", cmd)\n\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)",
"def test_create_cluster_role(self):\n pass",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.UPDATE\n )",
"def startCluster():\n # attempt to create a cluster\n print(\"Creating a Redshift cluster...\")\n try:\n redshift.create_cluster(\n\n # hardware parameters\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n # database access configuration\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n\n # accesses\n IamRoles=[iam.get_role(RoleName=DWH_IAM_ROLE_NAME)[\"Role\"][\"Arn\"]]\n )\n except Exception as e:\n print(e)\n return\n\n # wait for cluster to spin up\n print(\"Waiting for cluster to be available...\")\n while redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )[\"Clusters\"][0][\"ClusterStatus\"] != \"available\":\n time.sleep(30)\n print(\"\\tChecking status again...\")",
"def kops_provision(self):\n logger.info('Start provisioning the cluster {}'.format(\n self.config['cluster_name']))\n\n command = 'kops create cluster --cloud {provider} --name {cluster_name} --project {project} --zones $ZONES --master-zones $ZONES --node-count {worker_node_count} --image {os_image} --yes'.format(\n cluster_name=self.config['cluster_name'], provider=self.provider, project=self.config['project_id'], worker_node_count=self.config['worker_node_count'], os_image='ubuntu-os-cloud/ubuntu-1604-xenial-v20170202')\n\n logger.info(\n 'executed command to provision the cluster {}'.format(command))\n\n result = subprocess.run(command, stdout=PIPE, stderr=PIPE,\n\n universal_newlines=True, shell=True, env=self.env)\n\n if result.returncode == 0:\n logger.info(result.stdout)\n return True, None\n else:\n msg = 'Cannot provision the cluster: {id}/{cluster_name} from provider {provider} using KOPS'.format(\n id=self.config['id'], cluster_name=self.config['cluster_name'], provider=self.provider)\n logger.info(msg)\n\n if result.stderr:\n logger.info(result.stderr)\n\n return False, msg\n\n return True, ''",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif (\n self.context.raw_param.get(\"enable_addons\") is not None or\n self.context.raw_param.get(\"disable_addons\") is not None\n ):\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = check_is_msi_cluster(cluster)\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )"
] |
[
"0.67845285",
"0.6472733",
"0.6166948",
"0.5738119",
"0.5612151",
"0.55861145",
"0.55009985",
"0.5440655",
"0.53569967",
"0.52553874",
"0.52533066",
"0.5251024",
"0.5222459",
"0.52190816",
"0.52048093",
"0.5204209",
"0.5203179",
"0.5169113",
"0.51444334",
"0.5130886",
"0.50899684",
"0.50763553",
"0.5063752",
"0.50523794",
"0.50502205",
"0.5031503",
"0.5025324",
"0.50252545",
"0.50097626",
"0.50043154"
] |
0.7180916
|
0
|
Set up azure monitor profile for the ManagedCluster object.
|
def set_up_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
# read the original value passed by the command
ksm_metric_labels_allow_list = self.context.raw_param.get("ksm_metric_labels_allow_list")
ksm_metric_annotations_allow_list = self.context.raw_param.get("ksm_metric_annotations_allow_list")
if ksm_metric_labels_allow_list is None:
ksm_metric_labels_allow_list = ""
if ksm_metric_annotations_allow_list is None:
ksm_metric_annotations_allow_list = ""
if self.context.get_enable_azure_monitor_metrics():
if mc.azure_monitor_profile is None:
mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()
mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)
mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long
metric_labels_allowlist=str(ksm_metric_labels_allow_list),
metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))
# set intermediate
self.context.set_intermediate("azuremonitormetrics_addon_enabled", True, overwrite_exists=True)
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def __init__(__self__, *,\n metrics: Optional[pulumi.Input['ManagedClusterAzureMonitorProfileMetricsArgs']] = None):\n if metrics is not None:\n pulumi.set(__self__, \"metrics\", metrics)",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif (\n self.context.raw_param.get(\"enable_addons\") is not None or\n self.context.raw_param.get(\"disable_addons\") is not None\n ):\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = check_is_msi_cluster(cluster)\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )",
"def set_up_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if hasattr(self.models, \"ManagedClusterStorageProfile\"):\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif self.context.raw_param.get(\"enable_addons\") is not None:\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = self.context.get_enable_managed_identity()\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )\n\n # azure monitor metrics addon (v2)\n azuremonitormetrics_addon_enabled = self.context.get_intermediate(\n \"azuremonitormetrics_addon_enabled\",\n default_value=False\n )\n if azuremonitormetrics_addon_enabled:\n # Create the DC* objects, AMW, recording rules and grafana link here\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n True\n )",
"def test_create_hyperflex_cluster_profile(self):\n pass",
"def __init__(__self__, *,\n enabled: pulumi.Input[bool],\n kube_state_metrics: Optional[pulumi.Input['ManagedClusterAzureMonitorProfileKubeStateMetricsArgs']] = None):\n pulumi.set(__self__, \"enabled\", enabled)\n if kube_state_metrics is not None:\n pulumi.set(__self__, \"kube_state_metrics\", kube_state_metrics)",
"def set_up_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n aad_profile = None\n enable_aad = self.context.get_enable_aad()\n if enable_aad:\n aad_profile = self.models.ManagedClusterAADProfile(\n managed=True,\n enable_azure_rbac=self.context.get_enable_azure_rbac(),\n # ids -> i_ds due to track 2 naming issue\n admin_group_object_i_ds=self.context.get_aad_admin_group_object_ids(),\n tenant_id=self.context.get_aad_tenant_id()\n )\n else:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = (\n self.context.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()\n )\n aad_tenant_id = self.context.get_aad_tenant_id()\n if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):\n aad_profile = self.models.ManagedClusterAADProfile(\n client_app_id=aad_client_app_id,\n server_app_id=aad_server_app_id,\n server_app_secret=aad_server_app_secret,\n tenant_id=aad_tenant_id\n )\n mc.aad_profile = aad_profile\n return mc",
"def set_up_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n (\n windows_admin_username,\n windows_admin_password,\n ) = self.context.get_windows_admin_username_and_password()\n if windows_admin_username or windows_admin_password:\n # license\n windows_license_type = None\n if self.context.get_enable_ahub():\n windows_license_type = \"Windows_Server\"\n\n # gmsa\n gmsa_profile = None\n if self.context.get_enable_windows_gmsa():\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n\n # this would throw an error if windows_admin_username is empty (the user enters an empty\n # string after being prompted), since admin_username is a required parameter\n windows_profile = self.models.ManagedClusterWindowsProfile(\n # [SuppressMessage(\"Microsoft.Security\", \"CS002:SecretInNextLine\", Justification=\"variable name\")]\n admin_username=windows_admin_username,\n admin_password=windows_admin_password,\n license_type=windows_license_type,\n gmsa_profile=gmsa_profile,\n )\n\n mc.windows_profile = windows_profile\n return mc",
"def set_up_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity_profile = None\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n kubelet_identity = self.context.get_identity_by_msi_client(assign_kubelet_identity)\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n client_id=kubelet_identity.client_id, # TODO: may remove, rp would take care of this\n object_id=kubelet_identity.principal_id # TODO: may remove, rp would take care of this\n )\n }\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id()\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def set_up_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def set_up_network_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # build load balancer profile, which is part of the network profile\n load_balancer_profile = create_load_balancer_profile(\n self.context.get_load_balancer_managed_outbound_ip_count(),\n self.context.get_load_balancer_managed_outbound_ipv6_count(),\n self.context.get_load_balancer_outbound_ips(),\n self.context.get_load_balancer_outbound_ip_prefixes(),\n self.context.get_load_balancer_outbound_ports(),\n self.context.get_load_balancer_idle_timeout(),\n models=self.models.load_balancer_models,\n )\n\n # verify outbound type\n # Note: Validation internally depends on load_balancer_sku, which is a temporary value that is\n # dynamically completed.\n outbound_type = self.context.get_outbound_type(\n load_balancer_profile=load_balancer_profile\n )\n\n # verify load balancer sku\n load_balancer_sku = safe_lower(self.context.get_load_balancer_sku())\n\n # verify network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy\n network_plugin = self.context.get_network_plugin()\n network_plugin_mode = self.context.get_network_plugin_mode()\n (\n pod_cidr,\n service_cidr,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n ) = (\n self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n )\n network_profile = None\n # set up pod_cidrs, service_cidrs and ip_families\n (\n pod_cidrs,\n service_cidrs,\n ip_families\n ) = (\n self.context.get_pod_cidrs_and_service_cidrs_and_ip_families()\n )\n\n network_dataplane = self.context.get_network_dataplane()\n\n if any(\n [\n network_plugin,\n network_plugin_mode,\n pod_cidr,\n pod_cidrs,\n service_cidr,\n service_cidrs,\n ip_families,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n network_dataplane,\n ]\n ):\n # Attention: RP would return UnexpectedLoadBalancerSkuForCurrentOutboundConfiguration internal server error\n # if load_balancer_sku is set to basic and load_balancer_profile is assigned.\n # Attention: SDK provides default values for pod_cidr, service_cidr, dns_service_ip, docker_bridge_cidr\n # and outbound_type, and they might be overwritten to None.\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=network_plugin,\n network_plugin_mode=network_plugin_mode,\n pod_cidr=pod_cidr,\n pod_cidrs=pod_cidrs,\n service_cidr=service_cidr,\n service_cidrs=service_cidrs,\n ip_families=ip_families,\n dns_service_ip=dns_service_ip,\n docker_bridge_cidr=docker_bridge_address,\n network_policy=network_policy,\n network_dataplane=network_dataplane,\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n else:\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_STANDARD or load_balancer_profile:\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=\"kubenet\",\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_BASIC:\n # load balancer sku must be standard when load balancer profile is provided\n network_profile = self.models.ContainerServiceNetworkProfile(\n load_balancer_sku=load_balancer_sku,\n )\n\n # build nat gateway profile, which is part of the network profile\n nat_gateway_profile = create_nat_gateway_profile(\n self.context.get_nat_gateway_managed_outbound_ip_count(),\n self.context.get_nat_gateway_idle_timeout(),\n models=self.models.nat_gateway_models,\n )\n load_balancer_sku = self.context.get_load_balancer_sku()\n if load_balancer_sku != CONST_LOAD_BALANCER_SKU_BASIC:\n network_profile.nat_gateway_profile = nat_gateway_profile\n mc.network_profile = network_profile\n return mc",
"def set_up_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_profile = None\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel:\n auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile(upgrade_channel=auto_upgrade_channel)\n mc.auto_upgrade_profile = auto_upgrade_profile\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n return mc",
"def set_up_linux_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n ssh_key_value, no_ssh_key = self.context.get_ssh_key_value_and_no_ssh_key()\n if not no_ssh_key:\n ssh_config = self.models.ContainerServiceSshConfiguration(\n public_keys=[\n self.models.ContainerServiceSshPublicKey(\n key_data=ssh_key_value\n )\n ]\n )\n linux_profile = self.models.ContainerServiceLinuxProfile(\n admin_username=self.context.get_admin_username(), ssh=ssh_config\n )\n mc.linux_profile = linux_profile\n return mc",
"def set_up_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def construct_mc_profile_default(self, bypass_restore_defaults: bool = False) -> ManagedCluster:\n # initialize the ManagedCluster object\n mc = self.init_mc()\n # DO NOT MOVE: remove defaults\n self._remove_defaults_in_mc(mc)\n\n # set up agentpool profile\n mc = self.set_up_agentpool_profile(mc)\n # set up misc direct mc properties\n mc = self.set_up_mc_properties(mc)\n # set up linux profile (for ssh access)\n mc = self.set_up_linux_profile(mc)\n # set up windows profile\n mc = self.set_up_windows_profile(mc)\n # set up service principal profile\n mc = self.set_up_service_principal_profile(mc)\n # add role assignment for vent subnet\n self.process_add_role_assignment_for_vnet_subnet(mc)\n # attach acr (add role assignment for acr)\n self.process_attach_acr(mc)\n # set up network profile\n mc = self.set_up_network_profile(mc)\n # set up addon profiles\n mc = self.set_up_addon_profiles(mc)\n # set up aad profile\n mc = self.set_up_aad_profile(mc)\n # set up oidc issuer profile\n mc = self.set_up_oidc_issuer_profile(mc)\n # set up api server access profile and fqdn subdomain\n mc = self.set_up_api_server_access_profile(mc)\n # set up identity\n mc = self.set_up_identity(mc)\n # set up identity profile\n mc = self.set_up_identity_profile(mc)\n # set up auto upgrade profile\n mc = self.set_up_auto_upgrade_profile(mc)\n # set up auto scaler profile\n mc = self.set_up_auto_scaler_profile(mc)\n # set up sku\n mc = self.set_up_sku(mc)\n # set up extended location\n mc = self.set_up_extended_location(mc)\n # set up node resource group\n mc = self.set_up_node_resource_group(mc)\n # set up defender\n mc = self.set_up_defender(mc)\n # set up workload identity profile\n mc = self.set_up_workload_identity_profile(mc)\n # set up storage profile\n mc = self.set_up_storage_profile(mc)\n # set up azure keyvalut kms\n mc = self.set_up_azure_keyvault_kms(mc)\n # set up image cleaner\n mc = self.set_up_image_cleaner(mc)\n # set up http proxy config\n mc = self.set_up_http_proxy_config(mc)\n # set up workload autoscaler profile\n mc = self.set_up_workload_auto_scaler_profile(mc)\n\n # setup k8s support plan\n mc = self.set_up_k8s_support_plan(mc)\n # set up azure monitor metrics profile\n mc = self.set_up_azure_monitor_profile(mc)\n # DO NOT MOVE: keep this at the bottom, restore defaults\n if not bypass_restore_defaults:\n mc = self._restore_defaults_in_mc(mc)\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def set_up_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n return mc",
"def set_up_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\n \"CONST_MONITORING_ADDON_NAME\"\n )\n CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get(\n \"CONST_VIRTUAL_NODE_ADDON_NAME\"\n )\n CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME = addon_consts.get(\n \"CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME\"\n )\n CONST_KUBE_DASHBOARD_ADDON_NAME = addon_consts.get(\n \"CONST_KUBE_DASHBOARD_ADDON_NAME\"\n )\n CONST_AZURE_POLICY_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_POLICY_ADDON_NAME\"\n )\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\n \"CONST_INGRESS_APPGW_ADDON_NAME\"\n )\n CONST_CONFCOM_ADDON_NAME = addon_consts.get(\"CONST_CONFCOM_ADDON_NAME\")\n CONST_OPEN_SERVICE_MESH_ADDON_NAME = addon_consts.get(\n \"CONST_OPEN_SERVICE_MESH_ADDON_NAME\"\n )\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n addon_profiles = {}\n # error out if any unrecognized or duplicate addon provided\n # error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is\n # error out if '--enable-addons=virtual-node' is set but aci_subnet_name and vnet_subnet_id are not\n addons = self.context.get_enable_addons()\n if \"http_application_routing\" in addons:\n addon_profiles[\n CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME\n ] = self.build_http_application_routing_addon_profile()\n if \"kube-dashboard\" in addons:\n addon_profiles[\n CONST_KUBE_DASHBOARD_ADDON_NAME\n ] = self.build_kube_dashboard_addon_profile()\n if \"monitoring\" in addons:\n addon_profiles[\n CONST_MONITORING_ADDON_NAME\n ] = self.build_monitoring_addon_profile()\n if \"azure-policy\" in addons:\n addon_profiles[\n CONST_AZURE_POLICY_ADDON_NAME\n ] = self.build_azure_policy_addon_profile()\n if \"virtual-node\" in addons:\n # TODO: how about aciConnectorwindows, what is its addon name?\n os_type = self.context.get_virtual_node_addon_os_type()\n addon_profiles[\n CONST_VIRTUAL_NODE_ADDON_NAME + os_type\n ] = self.build_virtual_node_addon_profile()\n if \"ingress-appgw\" in addons:\n addon_profiles[\n CONST_INGRESS_APPGW_ADDON_NAME\n ] = self.build_ingress_appgw_addon_profile()\n if \"confcom\" in addons:\n addon_profiles[\n CONST_CONFCOM_ADDON_NAME\n ] = self.build_confcom_addon_profile()\n if \"open-service-mesh\" in addons:\n addon_profiles[\n CONST_OPEN_SERVICE_MESH_ADDON_NAME\n ] = self.build_open_service_mesh_addon_profile()\n if \"azure-keyvault-secrets-provider\" in addons:\n addon_profiles[\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ] = self.build_azure_keyvault_secrets_provider_addon_profile()\n mc.addon_profiles = addon_profiles\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def set_up_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler(enabled=True)\n else:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n return mc",
"def set_up_service_principal_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # If customer explicitly provide a service principal, disable managed identity.\n (\n service_principal,\n client_secret,\n ) = self.context.get_service_principal_and_client_secret()\n enable_managed_identity = self.context.get_enable_managed_identity()\n # Skip create service principal profile for the cluster if the cluster enables managed identity\n # and customer doesn't explicitly provide a service principal.\n if not (\n enable_managed_identity and\n not service_principal and\n not client_secret\n ):\n service_principal_profile = (\n self.models.ManagedClusterServicePrincipalProfile(\n client_id=service_principal, secret=client_secret\n )\n )\n mc.service_principal_profile = service_principal_profile\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def set_up_mc_properties(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.tags = self.context.get_tags()\n mc.kubernetes_version = self.context.get_kubernetes_version()\n mc.dns_prefix = self.context.get_dns_name_prefix()\n mc.disk_encryption_set_id = self.context.get_node_osdisk_diskencryptionset_id()\n mc.disable_local_accounts = self.context.get_disable_local_accounts()\n mc.enable_rbac = not self.context.get_disable_rbac()\n return mc",
"def metrics(self) -> Optional[pulumi.Input['ManagedClusterAzureMonitorProfileMetricsArgs']]:\n return pulumi.get(self, \"metrics\")",
"def start_monitor(self, collector):\n pass",
"def setup(self, cluster):\n raise NotImplementedError()"
] |
[
"0.72846806",
"0.71346223",
"0.64711446",
"0.6104006",
"0.60403425",
"0.59842443",
"0.5969809",
"0.59013",
"0.5852365",
"0.5829357",
"0.5784714",
"0.5775937",
"0.5744401",
"0.570182",
"0.57013726",
"0.5676068",
"0.56605566",
"0.5610358",
"0.5585451",
"0.5560262",
"0.55012053",
"0.5429743",
"0.54186887",
"0.53935385",
"0.53758067",
"0.5358409",
"0.531207",
"0.5299413",
"0.5287958",
"0.5280591"
] |
0.7944016
|
0
|
The overall controller used to construct the default ManagedCluster profile. The completely constructed ManagedCluster object will later be passed as a parameter to the underlying SDK (mgmtcontainerservice) to send the actual request.
|
def construct_mc_profile_default(self, bypass_restore_defaults: bool = False) -> ManagedCluster:
# initialize the ManagedCluster object
mc = self.init_mc()
# DO NOT MOVE: remove defaults
self._remove_defaults_in_mc(mc)
# set up agentpool profile
mc = self.set_up_agentpool_profile(mc)
# set up misc direct mc properties
mc = self.set_up_mc_properties(mc)
# set up linux profile (for ssh access)
mc = self.set_up_linux_profile(mc)
# set up windows profile
mc = self.set_up_windows_profile(mc)
# set up service principal profile
mc = self.set_up_service_principal_profile(mc)
# add role assignment for vent subnet
self.process_add_role_assignment_for_vnet_subnet(mc)
# attach acr (add role assignment for acr)
self.process_attach_acr(mc)
# set up network profile
mc = self.set_up_network_profile(mc)
# set up addon profiles
mc = self.set_up_addon_profiles(mc)
# set up aad profile
mc = self.set_up_aad_profile(mc)
# set up oidc issuer profile
mc = self.set_up_oidc_issuer_profile(mc)
# set up api server access profile and fqdn subdomain
mc = self.set_up_api_server_access_profile(mc)
# set up identity
mc = self.set_up_identity(mc)
# set up identity profile
mc = self.set_up_identity_profile(mc)
# set up auto upgrade profile
mc = self.set_up_auto_upgrade_profile(mc)
# set up auto scaler profile
mc = self.set_up_auto_scaler_profile(mc)
# set up sku
mc = self.set_up_sku(mc)
# set up extended location
mc = self.set_up_extended_location(mc)
# set up node resource group
mc = self.set_up_node_resource_group(mc)
# set up defender
mc = self.set_up_defender(mc)
# set up workload identity profile
mc = self.set_up_workload_identity_profile(mc)
# set up storage profile
mc = self.set_up_storage_profile(mc)
# set up azure keyvalut kms
mc = self.set_up_azure_keyvault_kms(mc)
# set up image cleaner
mc = self.set_up_image_cleaner(mc)
# set up http proxy config
mc = self.set_up_http_proxy_config(mc)
# set up workload autoscaler profile
mc = self.set_up_workload_auto_scaler_profile(mc)
# setup k8s support plan
mc = self.set_up_k8s_support_plan(mc)
# set up azure monitor metrics profile
mc = self.set_up_azure_monitor_profile(mc)
# DO NOT MOVE: keep this at the bottom, restore defaults
if not bypass_restore_defaults:
mc = self._restore_defaults_in_mc(mc)
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def init_mc(self) -> ManagedCluster:\n # Initialize a ManagedCluster object with mandatory parameter location.\n mc = self.models.ManagedCluster(\n location=self.context.get_location(),\n )\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def management_cluster(self) -> pulumi.Input['PrivateCloudManagementClusterArgs']:\n return pulumi.get(self, \"management_cluster\")",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def __create(self):\n pass\n\n # create at cluster-provider\n # get kubeconfig\n # wait for api\n # ^ could be async and seperate steps?",
"def management_cluster(self) -> Optional[pulumi.Input['PrivateCloudManagementClusterArgs']]:\n return pulumi.get(self, \"management_cluster\")",
"def management_cluster(self) -> pulumi.Output['outputs.PrivateCloudManagementCluster']:\n return pulumi.get(self, \"management_cluster\")",
"def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")",
"def fusion_api_create_hypervisor_cluster_profile(self, body, api=None, headers=None):\n return self.cluster_profile.create(body=body, api=api, headers=headers)",
"def set_up_network_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # build load balancer profile, which is part of the network profile\n load_balancer_profile = create_load_balancer_profile(\n self.context.get_load_balancer_managed_outbound_ip_count(),\n self.context.get_load_balancer_managed_outbound_ipv6_count(),\n self.context.get_load_balancer_outbound_ips(),\n self.context.get_load_balancer_outbound_ip_prefixes(),\n self.context.get_load_balancer_outbound_ports(),\n self.context.get_load_balancer_idle_timeout(),\n models=self.models.load_balancer_models,\n )\n\n # verify outbound type\n # Note: Validation internally depends on load_balancer_sku, which is a temporary value that is\n # dynamically completed.\n outbound_type = self.context.get_outbound_type(\n load_balancer_profile=load_balancer_profile\n )\n\n # verify load balancer sku\n load_balancer_sku = safe_lower(self.context.get_load_balancer_sku())\n\n # verify network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy\n network_plugin = self.context.get_network_plugin()\n network_plugin_mode = self.context.get_network_plugin_mode()\n (\n pod_cidr,\n service_cidr,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n ) = (\n self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n )\n network_profile = None\n # set up pod_cidrs, service_cidrs and ip_families\n (\n pod_cidrs,\n service_cidrs,\n ip_families\n ) = (\n self.context.get_pod_cidrs_and_service_cidrs_and_ip_families()\n )\n\n network_dataplane = self.context.get_network_dataplane()\n\n if any(\n [\n network_plugin,\n network_plugin_mode,\n pod_cidr,\n pod_cidrs,\n service_cidr,\n service_cidrs,\n ip_families,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n network_dataplane,\n ]\n ):\n # Attention: RP would return UnexpectedLoadBalancerSkuForCurrentOutboundConfiguration internal server error\n # if load_balancer_sku is set to basic and load_balancer_profile is assigned.\n # Attention: SDK provides default values for pod_cidr, service_cidr, dns_service_ip, docker_bridge_cidr\n # and outbound_type, and they might be overwritten to None.\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=network_plugin,\n network_plugin_mode=network_plugin_mode,\n pod_cidr=pod_cidr,\n pod_cidrs=pod_cidrs,\n service_cidr=service_cidr,\n service_cidrs=service_cidrs,\n ip_families=ip_families,\n dns_service_ip=dns_service_ip,\n docker_bridge_cidr=docker_bridge_address,\n network_policy=network_policy,\n network_dataplane=network_dataplane,\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n else:\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_STANDARD or load_balancer_profile:\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=\"kubenet\",\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_BASIC:\n # load balancer sku must be standard when load balancer profile is provided\n network_profile = self.models.ContainerServiceNetworkProfile(\n load_balancer_sku=load_balancer_sku,\n )\n\n # build nat gateway profile, which is part of the network profile\n nat_gateway_profile = create_nat_gateway_profile(\n self.context.get_nat_gateway_managed_outbound_ip_count(),\n self.context.get_nat_gateway_idle_timeout(),\n models=self.models.nat_gateway_models,\n )\n load_balancer_sku = self.context.get_load_balancer_sku()\n if load_balancer_sku != CONST_LOAD_BALANCER_SKU_BASIC:\n network_profile.nat_gateway_profile = nat_gateway_profile\n mc.network_profile = network_profile\n return mc",
"def _init_cluster(self):\n self._Init_Cluster()",
"def page1(self):\n result = request101.GET('/clusterinfo-web/controller')\n # 10 different values for token_target found in response, using the first one.\n self.token_target = \\\n httpUtilities.valueFromHiddenInput('target') # 'AllClusterState'\n\n return result",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.CREATE\n )",
"def Cluster(request, io_loop):\n\n def ClusterConstructor(**kwargs):\n log = logging.getLogger(__file__)\n log.setLevel(logging.DEBUG)\n log.handlers = [logging.StreamHandler(sys.stdout)]\n kwargs['log'] = log\n engine_launcher_class = kwargs.get(\"engine_launcher_class\")\n\n if (\n isinstance(engine_launcher_class, str)\n and \"MPI\" in engine_launcher_class\n and shutil.which(\"mpiexec\") is None\n ):\n pytest.skip(\"requires mpiexec\")\n\n cfg = kwargs.setdefault(\"config\", Config())\n cfg.EngineLauncher.engine_args = ['--log-level=10']\n cfg.ControllerLauncher.controller_args = ['--log-level=10']\n kwargs.setdefault(\"controller_args\", ['--ping=250'])\n\n c = cluster.Cluster(**kwargs)\n assert c.config is cfg\n request.addfinalizer(c.stop_cluster_sync)\n return c\n\n yield ClusterConstructor",
"def set_up_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def getClusterSetup(self):\n data = {}\n data[\"parameters\"] = self.config.getACSParams()\n \n fqdn = {}\n fqdn[\"master\"] = self.getManagementEndpoint()\n fqdn[\"agent\"] = self.getAgentEndpoint()\n data[\"domains\"] = fqdn\n \n data[\"sshTunnel\"] = \"ssh -o StrictHostKeyChecking=no -L 80:localhost:80 -N \" + self.config.get('ACS', 'username') + \"@\" + self.getManagementEndpoint() + \" -p 2200\"\n\n azure = {}\n azure['resourceGroup'] = self.config.get('Group', 'name')\n data[\"azure\"] = azure\n\n return data",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def _controller(self):\n # TODO: Probably better to use request_patron and check for\n # None here.\n patron = self.authenticated_patron_from_request()\n storage = CirculationPatronProfileStorage(patron, flask.url_for)\n return CoreProfileController(storage)",
"def build_kube_dashboard_addon_profile(self) -> ManagedClusterAddonProfile:\n kube_dashboard_addon_profile = self.models.ManagedClusterAddonProfile(\n enabled=True,\n )\n return kube_dashboard_addon_profile",
"def test_create_hyperflex_cluster_profile(self):\n pass",
"def set_up_service_principal_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # If customer explicitly provide a service principal, disable managed identity.\n (\n service_principal,\n client_secret,\n ) = self.context.get_service_principal_and_client_secret()\n enable_managed_identity = self.context.get_enable_managed_identity()\n # Skip create service principal profile for the cluster if the cluster enables managed identity\n # and customer doesn't explicitly provide a service principal.\n if not (\n enable_managed_identity and\n not service_principal and\n not client_secret\n ):\n service_principal_profile = (\n self.models.ManagedClusterServicePrincipalProfile(\n client_id=service_principal, secret=client_secret\n )\n )\n mc.service_principal_profile = service_principal_profile\n return mc",
"def set_up_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n aad_profile = None\n enable_aad = self.context.get_enable_aad()\n if enable_aad:\n aad_profile = self.models.ManagedClusterAADProfile(\n managed=True,\n enable_azure_rbac=self.context.get_enable_azure_rbac(),\n # ids -> i_ds due to track 2 naming issue\n admin_group_object_i_ds=self.context.get_aad_admin_group_object_ids(),\n tenant_id=self.context.get_aad_tenant_id()\n )\n else:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = (\n self.context.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()\n )\n aad_tenant_id = self.context.get_aad_tenant_id()\n if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):\n aad_profile = self.models.ManagedClusterAADProfile(\n client_app_id=aad_client_app_id,\n server_app_id=aad_server_app_id,\n server_app_secret=aad_server_app_secret,\n tenant_id=aad_tenant_id\n )\n mc.aad_profile = aad_profile\n return mc",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.UPDATE\n )",
"def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def create(self):\n print(\"+ Creating cluster: {}. This may take a few minutes ...\".format(self.name_hyphenated))\n if self.num_gpus == 0:\n out = util.syscall(\"gcloud container clusters create {} -m {} --disk-size {} --num-nodes {} {}\".\n format(self.name_hyphenated, self.machine_type, self.disk_size, self.num_nodes,\n \"--zone \" + self.location if self.location else \"\"), return_outputs=\"as_str\")\n else:\n out = util.syscall(\"gcloud container clusters create {} --enable-cloud-logging --enable-cloud-monitoring \"\n \"--accelerator type={},count={} {} -m {} --disk-size {} --enable-kubernetes-alpha \"\n \"--image-type UBUNTU --num-nodes {} --cluster-version 1.9.2-gke.1 --quiet\".\n format(self.name_hyphenated, self.gpu_type, self.gpus_per_node,\n \"--zone \"+self.location if self.location else \"\", self.machine_type, self.disk_size,\n self.num_nodes), return_outputs=\"as_str\")\n # check output of cluster generating code\n if re.search(r'error', out, re.IGNORECASE):\n raise util.TFCliError(out)\n else:\n print(\"+ Successfully created cluster.\")\n self.instances, self.primary_name = util.get_compute_instance_specs(self.name_hyphenated)\n self.started = True\n\n # install NVIDIA drivers on machines per local kubectl\n if self.num_gpus > 0:\n print(\"+ Installing NVIDIA GPU drivers and k8s device plugins ...\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/GoogleCloudPlatform/\"\n \"container-engine-accelerators/k8s-1.9/daemonset.yaml\")\n util.syscall(\"kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n\n print(\"+ Done. Cluster: {} created.\".format(self.name_hyphenated))",
"def run(job=None, logger=None, **kwargs):\n environment = Environment.objects.get(id=ENV_ID)\n\n # Save cluster data on the resource so teardown works later\n create_required_parameters()\n resource = kwargs['resource']\n resource.create_gke_k8s_cluster_env = environment.id\n resource.create_gke_k8s_cluster_name = CLUSTER_NAME\n resource.name = CLUSTER_NAME\n resource.save()\n\n job.set_progress('Connecting to GKE...')\n builder = GKEClusterBuilder(environment, CLUSTER_NAME)\n\n job.set_progress('Sending request for new cluster {}...'.format(CLUSTER_NAME))\n builder.create_cluster(NODE_COUNT)\n\n job.set_progress('Waiting up to {} seconds for provisioning to complete.'\n .format(TIMEOUT))\n start = time.time()\n job.set_progress('Waiting for cluster IP address...')\n endpoint = builder.wait_for_endpoint(timeout=TIMEOUT)\n if not endpoint:\n return (\"FAILURE\",\n \"No IP address returned after {} seconds\".format(TIMEOUT),\n \"\")\n\n remaining_time = TIMEOUT - (time.time() - start)\n job.set_progress('Waiting for nodes to report hostnames...')\n nodes = builder.wait_for_nodes(NODE_COUNT, timeout=remaining_time)\n if len(nodes) < NODE_COUNT:\n return (\"FAILURE\",\n \"Nodes are not ready after {} seconds\".format(TIMEOUT),\n \"\")\n\n job.set_progress('Importing cluster...')\n cluster = builder.get_cluster()\n tech = ContainerOrchestratorTechnology.objects.get(name='Kubernetes')\n kubernetes = Kubernetes.objects.create(\n name=CLUSTER_NAME,\n ip=cluster['endpoint'],\n port=443,\n protocol='https',\n serviceaccount=cluster['masterAuth']['username'],\n servicepasswd=cluster['masterAuth']['password'],\n container_technology=tech,\n )\n resource.create_gke_k8s_cluster_id = kubernetes.id\n resource.save()\n url = 'https://{}{}'.format(\n PortalConfig.get_current_portal().domain,\n reverse('container_orchestrator_detail', args=[kubernetes.id])\n )\n job.set_progress(\"Cluster URL: {}\".format(url))\n\n job.set_progress('Importing nodes...')\n for node in nodes:\n # Generate libcloud UUID from GCE ID\n id_unicode = '{}:{}'.format(node['id'], 'gce')\n uuid = hashlib.sha1(id_unicode.encode('utf-8')).hexdigest()\n # Create a bbones server record. Other details like CPU and Mem Size\n # will be populated the next time the GCE handler is synced.\n Server.objects.create(\n hostname=node['name'],\n resource_handler_svr_id=uuid,\n environment=environment,\n resource_handler=environment.resource_handler,\n group=resource.group,\n owner=resource.owner,\n )\n\n job.set_progress('Waiting for cluster to report as running...')\n remaining_time = TIMEOUT - (time.time() - start)\n status = builder.wait_for_running_status(timeout=remaining_time)\n if status != 'RUNNING':\n return (\"FAILURE\",\n \"Status is {} after {} seconds (expected RUNNING)\".format(\n status, TIMEOUT),\n \"\")\n\n return (\"SUCCESS\",\n \"Cluster is ready and can be accessed at {}\".format(url),\n \"\")",
"def fetch_mc(self) -> ManagedCluster:\n mc = self.client.get(self.context.get_resource_group_name(), self.context.get_name())\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def create_cluster():\n config = get_kube_config()\n command = CLUSTER_CREATE_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n machine_type=config['machine_type'],\n disk_size=config['disk_size'],\n nodes=config['nodes'],\n zone=config['zone'])\n print \"Creating cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))\n command = AUTH_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n zone=config['zone'])\n print \"Authenticating with cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))",
"def page11(self):\n self.token_target = \\\n 'Request'\n self.token_mode = \\\n 'Reset'\n result = request1101.GET('/clusterinfo-web/controller' +\n '?target=' +\n self.token_target +\n '&mode=' +\n self.token_mode)\n # 10 different values for token_target found in response, using the first one.\n self.token_target = \\\n httpUtilities.valueFromHiddenInput('target') # 'AllClusterState'\n\n return result",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif self.context.raw_param.get(\"enable_addons\") is not None:\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = self.context.get_enable_managed_identity()\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )\n\n # azure monitor metrics addon (v2)\n azuremonitormetrics_addon_enabled = self.context.get_intermediate(\n \"azuremonitormetrics_addon_enabled\",\n default_value=False\n )\n if azuremonitormetrics_addon_enabled:\n # Create the DC* objects, AMW, recording rules and grafana link here\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n True\n )"
] |
[
"0.6218793",
"0.5972921",
"0.58533275",
"0.58225167",
"0.5787789",
"0.57581836",
"0.5740016",
"0.5665097",
"0.5546167",
"0.5515622",
"0.5507955",
"0.5492365",
"0.54808474",
"0.5428319",
"0.5423355",
"0.5421532",
"0.5405033",
"0.5404923",
"0.5400361",
"0.5365038",
"0.534484",
"0.5287839",
"0.52851564",
"0.5274412",
"0.5255758",
"0.5250406",
"0.5223332",
"0.5205969",
"0.5201369",
"0.5185841"
] |
0.6419381
|
0
|
Helper function to check if postprocessing is required after sending a PUT request to create the cluster.
|
def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool:
# some addons require post cluster creation role assigment
monitoring_addon_enabled = self.context.get_intermediate("monitoring_addon_enabled", default_value=False)
ingress_appgw_addon_enabled = self.context.get_intermediate("ingress_appgw_addon_enabled", default_value=False)
virtual_node_addon_enabled = self.context.get_intermediate("virtual_node_addon_enabled", default_value=False)
azuremonitormetrics_addon_enabled = self.context.get_intermediate(
"azuremonitormetrics_addon_enabled",
default_value=False
)
enable_managed_identity = self.context.get_enable_managed_identity()
attach_acr = self.context.get_attach_acr()
need_grant_vnet_permission_to_cluster_identity = self.context.get_intermediate(
"need_post_creation_vnet_permission_granting", default_value=False
)
if (
monitoring_addon_enabled or
ingress_appgw_addon_enabled or
virtual_node_addon_enabled or
azuremonitormetrics_addon_enabled or
(enable_managed_identity and attach_acr) or
need_grant_vnet_permission_to_cluster_identity
):
return True
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool:\n # some addons require post cluster creation role assigment\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n enable_managed_identity = check_is_msi_cluster(mc)\n attach_acr = self.context.get_attach_acr()\n\n if (\n monitoring_addon_enabled or\n ingress_appgw_addon_enabled or\n virtual_node_addon_enabled or\n (enable_managed_identity and attach_acr)\n ):\n return True\n return False",
"def is_bootstrapped(self):\n\n # Attempt to bootstrap without providing any of the required fields, and inspect the exception\n try:\n response = self._connection.post(\"/deployment/new\", json={})\n raise TransportError(\"POST {} to /deployment/new should have raised an exception, but didn't\", response)\n except ValueError as e:\n if e.args[0] == 400:\n # The server is willing to accept correct field values to bootstrap with, so isn't bootstrapped yet.\n return False\n if e.args[0] == 403:\n # The server is no longer willing to accept POSTs to /deployment/new, because it's already bootstrapped.\n return True\n raise\n raise TransportError(response)",
"def is_put_or_post(split_request_header: list) -> bool:\n if split_request_header[0] == \"PUT\":\n return True\n elif split_request_header[0] == \"POST\":\n return True\n\n return False",
"def check_if_cluster_was_upgraded():\n return True if \"replaces\" in get_ocs_csv().get().get(\"spec\") else False",
"def is_catastrophic(self):\n if (self.request.method.upper() == 'PUT'\n and 'PLURAL_PUT' not in self.http_methods) \\\n or (self.request.method.upper() == 'DELETE'\n and 'PLURAL_DELETE' not in self.http_methods):\n return True\n return False",
"def is_valid(self):\n return self.post_processor in PostProcessor.valid_list()",
"def ispostSupportedClass(cls, tmpcls, session=None):\n if session is not None:\n cls.getclsoptions(tmpcls, session)\n if 'POST' in optionsdict[tmpcls]['OPTIONS']:\n return True\n return False",
"def post(self):\n # by default post is not supported\n return False",
"def skip(self):\n if not self.helper_view.newDataOnly():\n return False\n\n if self.request.steps[-1].startswith(\"++add++\"):\n return False\n if self.request.method != \"PATCH\":\n # restapi calls\n return False\n return True",
"def post(self):\n self.not_supported()",
"def post_k8s_updating_tasks(post_tasks=None):\n if post_tasks:\n for task in post_tasks:\n if callable(task):\n task()\n LOG.debug('Running mandatory tasks after updating proccess has finished.')",
"def check_available(rs):\n info = rs.get_cluster_info()\n if info['ClusterStatus'] == 'available':\n return True\n elif info['ClusterStatus'] == 'deleting':\n raise AttributeError(f'Cluster is currently deleting. Please wait and try again.\\n{info}')\n elif info['ClusterStatus'] == 'creating': \n return False\n \n raise NameError(f'Could not get cluster status information. Current information about the cluster: \\n{info}')",
"def cluster_create():\n logger.info(\"/cluster action=\" + r.method)\n request_debug(r, logger)\n if not r.form[\"name\"] or not r.form[\"host_id\"] or not \\\n r.form[\"consensus_plugin\"] or not r.form[\"size\"]:\n logger.warning(\"cluster post without enough data\")\n response_fail[\"error\"] = \"cluster POST without enough data\"\n response_fail[\"data\"] = r.form\n return jsonify(response_fail), CODE_BAD_REQUEST\n else:\n name, host_id, consensus_plugin, consensus_mode, size = \\\n r.form['name'], r.form['host_id'], r.form['consensus_plugin'],\\\n r.form['consensus_mode'] or CONSENSUS_MODES[0], int(r.form[\n \"size\"])\n if consensus_plugin not in CONSENSUS_PLUGINS:\n logger.debug(\"Unknown consensus_plugin={}\".format(\n consensus_plugin))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if consensus_plugin != CONSENSUS_PLUGINS[0] and consensus_mode \\\n not in CONSENSUS_MODES:\n logger.debug(\"Invalid consensus, plugin={}, mode={}\".format(\n consensus_plugin, consensus_mode))\n return jsonify(response_fail), CODE_BAD_REQUEST\n\n if size not in CLUSTER_SIZES:\n logger.debug(\"Unknown cluster size={}\".format(size))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if cluster_handler.create(name=name, host_id=host_id,\n consensus_plugin=consensus_plugin,\n consensus_mode=consensus_mode,\n size=size):\n logger.debug(\"cluster POST successfully\")\n return jsonify(response_ok), CODE_CREATED\n else:\n logger.debug(\"cluster creation failed\")\n response_fail[\"error\"] = \"Failed to create cluster {}\".format(\n name)\n return jsonify(response_fail), CODE_BAD_REQUEST",
"def is_modify_required(self, obj_fs, cap_unit):\n try:\n to_update = {}\n obj_fs = obj_fs.update()\n description = self.module.params['description']\n\n if description is not None and description != obj_fs.description:\n to_update.update({'description': description})\n\n size = self.module.params['size']\n if size and cap_unit:\n size_byte = int(utils.get_size_bytes(size, cap_unit))\n if size_byte < obj_fs.size_total:\n self.module.fail_json(msg=\"Filesystem size can be \"\n \"expanded only\")\n elif size_byte > obj_fs.size_total:\n to_update.update({'size': size_byte})\n\n tiering_policy = self.module.params['tiering_policy']\n if tiering_policy and self.get_tiering_policy_enum(\n tiering_policy) != obj_fs.tiering_policy:\n to_update.update({'tiering_policy':\n self.get_tiering_policy_enum(\n tiering_policy)})\n\n is_thin = self.module.params['is_thin']\n if is_thin is not None and is_thin != obj_fs.is_thin_enabled:\n to_update.update({'is_thin': is_thin})\n\n data_reduction = self.module.params['data_reduction']\n if data_reduction is not None and \\\n data_reduction != obj_fs.is_data_reduction_enabled:\n to_update.update({'is_compression': data_reduction})\n\n access_policy = self.module.params['access_policy']\n if access_policy and self.get_access_policy_enum(\n access_policy) != obj_fs.access_policy:\n to_update.update({'access_policy':\n self.get_access_policy_enum(access_policy)})\n\n locking_policy = self.module.params['locking_policy']\n if locking_policy and self.get_locking_policy_enum(\n locking_policy) != obj_fs.locking_policy:\n to_update.update({'locking_policy':\n self.get_locking_policy_enum(\n locking_policy)})\n\n snap_sch = obj_fs.storage_resource.snap_schedule\n\n if self.snap_sch_id is not None:\n if self.snap_sch_id == \"\":\n if snap_sch and snap_sch.id != self.snap_sch_id:\n to_update.update({'is_snap_schedule_paused': False})\n elif snap_sch is None or snap_sch.id != self.snap_sch_id:\n to_update.update({'snap_sch_id': self.snap_sch_id})\n\n smb_properties = self.module.params['smb_properties']\n if smb_properties:\n sync_writes_enabled = \\\n smb_properties['is_smb_sync_writes_enabled']\n oplocks_enabled = \\\n smb_properties['is_smb_op_locks_enabled']\n notify_on_write = \\\n smb_properties['is_smb_notify_on_write_enabled']\n notify_on_access = \\\n smb_properties['is_smb_notify_on_access_enabled']\n notify_on_change_dir_depth = \\\n smb_properties['smb_notify_on_change_dir_depth']\n\n if sync_writes_enabled is not None and \\\n sync_writes_enabled != obj_fs.is_cifs_sync_writes_enabled:\n to_update.update(\n {'is_cifs_sync_writes_enabled': sync_writes_enabled})\n\n if oplocks_enabled is not None and \\\n oplocks_enabled != obj_fs.is_cifs_op_locks_enabled:\n to_update.update(\n {'is_cifs_op_locks_enabled': oplocks_enabled})\n\n if notify_on_write is not None and \\\n notify_on_write != \\\n obj_fs.is_cifs_notify_on_write_enabled:\n to_update.update(\n {'is_cifs_notify_on_write_enabled': notify_on_write})\n\n if notify_on_access is not None and \\\n notify_on_access != \\\n obj_fs.is_cifs_notify_on_access_enabled:\n to_update.update(\n {'is_cifs_notify_on_access_enabled':\n notify_on_access})\n\n if notify_on_change_dir_depth is not None and \\\n notify_on_change_dir_depth != \\\n obj_fs.cifs_notify_on_change_dir_depth:\n to_update.update(\n {'cifs_notify_on_change_dir_depth':\n notify_on_change_dir_depth})\n if len(to_update) > 0:\n return to_update\n else:\n return None\n\n except Exception as e:\n errormsg = \"Failed to determine if FileSystem id: {0}\" \\\n \" modification required with error {1}\".format(obj_fs.id,\n str(e))\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)",
"def test_kyc_put_request_legal(self):\n pass",
"def _should_initialize_check_run(self, payload):\n action = payload.get('action')\n return action in self.initialize_actions or self.initialize_actions is None",
"def pre_k8s_updating_tasks(post_tasks, params_to_preserve):\n # pylint: disable-msg=broad-except\n rc = 0\n LOG.debug('Running mandatory tasks before update proccess start.')\n try:\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n except Exception as e:\n LOG.error('Loading kube_apiserver config [Detail %s].', e)\n return 1\n\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n advertise_address = m.group(1)\n LOG.debug(' advertise_address = %s', advertise_address)\n params_to_preserve['advertise-address'] = advertise_address\n\n def _post_task_update_advertise_address():\n \"\"\"This method will be executed in right after control plane has been initialized and it\n will update advertise_address in manifests/kube-apiserver.yaml to use mgmt address\n instead of oam address due to https://bugs.launchpad.net/starlingx/+bug/1900153\n \"\"\"\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)\n\n def _post_task_security_context():\n cmd = [\"sed\", \"-i\", \"/securityContext:/,/type: RuntimeDefault/d\", KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)\n\n post_tasks.append(_post_task_update_advertise_address)\n post_tasks.append(_post_task_security_context)\n\n return rc",
"def ready(self):\n if not self.is_setup:\n return False\n\n if self.pocs.observatory.mount.is_parked:\n print_warning('Mount is parked. To unpark run `unpark`')\n return False\n\n return self.pocs.is_safe()",
"def pre_flight_checks(self):\n #=======================================================================\n #\n # TODO: Place any system checks here.\n #\n #=======================================================================\n return True",
"def is_prepost(*args):\n return _ida_hexrays.is_prepost(*args)",
"def test_client_can_do_put_request(self):\n response = self.httpbin_4.test_requests_put_method()\n self.assertEqual(response.request.method, 'PUT')\n self.assertEqual(response.status_code, 200)",
"def check_condor(self):\n\n self.cluster=self.info['mw_run']['1']\n self.norm_with_cross=self.info['mw_run']['4']\n self.condor_req=self.info['mw_run']['11']\n\n #type is automaticaly updated now\n #self.cluster=int(condor)\n #if norm_with_cross==\"F\":\n # self.norm_with_cross=0\n #else:\n # self.norm_with_cross=1",
"def required():\n kernel = __salt__['grains.item']('os') # pylint: disable=E0602,E0603\n\n # Disable rebooting for HDP clusters until that works reliably\n hadoop_distro = __salt__['pillar.get']('hadoop.distro') # pylint: disable=E0602,E0603\n if hadoop_distro == 'HDP':\n return False\n\n if kernel['os'] == \"CentOS\" or kernel['os'] == \"RedHat\":\n try:\n current_version = __salt__['cmd.run']('uname -r') # pylint: disable=E0602,E0603\n latest_version = __salt__['cmd.run']('rpm -q --last kernel') # pylint: disable=E0602,E0603\n latest_version = latest_version.split(\" \")\n latest_version = [\n version for version in latest_version if 'kernel' in version]\n latest_version = str(latest_version[0]).strip('kernel-') # pylint: disable=E1310\n if current_version == latest_version:\n return False\n except: # pylint: disable=W0702\n return False\n return True\n\n return __salt__['file.file_exists']('/var/run/reboot-required') # pylint: disable=E0602,E0603",
"def prerequisites():\n if not 'T2_TOKEN' in os.environ:\n print(\"Error: Please supply T2_TOKEN as an environment variable.\")\n exit(1)\n if not 'T2_URL' in os.environ:\n print(\"Error: Please supply T2_URL as an environment variable.\")\n exit(1)\n if not os.path.isfile(\"/cluster.yaml\"):\n print(\"Error Please supply cluster definition as file in /cluster.yaml.\")\n exit(1)",
"def match_request(self, req):\n\n return req.method == 'POST' and req.path_info == '/bitbucketsync'",
"def need_feature_generation(self):\n if self.feature_cmd_params:\n return True\n return False",
"def need_feature_generation(self):\n if self.feature_cmd_params:\n return True\n return False",
"def validate_on_put_request(self, data, **kwargs):\n if request.method == \"PUT\":\n if \"bio\" not in data or \"website\" not in data:\n raise ValidationError(\"Missing one or more fields.\")",
"def needs_update(self, context) -> bool:\n raise NotImplementedError",
"def _check_controller_state():\n chosts = pecan.request.dbapi.ihost_get_by_personality(\n constants.CONTROLLER)\n\n for chost in chosts:\n utils.is_host_state_valid_for_fs_resize(chost)\n\n return True"
] |
[
"0.65956247",
"0.6160552",
"0.5965819",
"0.5765838",
"0.54389656",
"0.52548474",
"0.5172896",
"0.51714987",
"0.5161744",
"0.5144229",
"0.50794834",
"0.50763243",
"0.50758445",
"0.5068631",
"0.506405",
"0.50525963",
"0.5025663",
"0.50229084",
"0.50158024",
"0.49857804",
"0.4980957",
"0.4976503",
"0.49752486",
"0.49740872",
"0.49688995",
"0.49546072",
"0.49546072",
"0.4947931",
"0.49326557",
"0.49307066"
] |
0.6787382
|
0
|
Initialize an AKSManagedClusterModels object to store the models.
|
def init_models(self) -> None:
self.models = AKSManagedClusterModels(self.cmd, self.resource_type)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.CREATE\n )",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.UPDATE\n )",
"def init_mc(self) -> ManagedCluster:\n # Initialize a ManagedCluster object with mandatory parameter location.\n mc = self.models.ManagedCluster(\n location=self.context.get_location(),\n )\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()",
"def initialize(self):\n for key in self.parameter_dict:\n self.models[key] = self._create_model(key)",
"def train_clustermodel_sparse(self):\n\n print('Clustering using: ' + self.algorithm)\n uniquesegments_df, sparse_matrix = self.create_sparse_matrix(self.data)\n\n clusterer = self.clustering_algorithms[self.algorithm]\n self.clustering_model = clusterer.fit(sparse_matrix)\n \n clusters_df = pd.DataFrame(self.clustering_model.labels_, columns = ['cluster_sparse'])\n clusters_df['segmentskey'] = clusters_df.index\n clusters_df = clusters_df.reset_index(drop=True)\n self.clusters_df_final = pd.merge(uniquesegments_df, clusters_df, on=['segmentskey'])\n self.clusters_df_final['cluster_sparse'].value_counts()\n \n today = datetime.date.today()\n filename = self.algorithm + '_sparse_cluster_model_' + today.strftime('%Y%m%d') + '.pkl'\n joblib.dump(self.clustering_model, filename)\n \n print('Stored ' + filename)\n \n return self.clustering_model, self.clusters_df_final[['segment_id','cluster_sparse']]",
"def initClusters(self):\n if len(self.labelList) != len(self.pointList):\n \traise ValueError(\"Label List and Point List not the same length!\")\n for i in range(len(self.labelList)):\n self.centroids[self.labelList[i]] = self.pointList[i]\n self.pointcounts[self.labelList[i]] = 1",
"def __init__(self,\n num_clusters,\n model_dir=None,\n initial_clusters=RANDOM_INIT,\n distance_metric=SQUARED_EUCLIDEAN_DISTANCE,\n random_seed=0,\n use_mini_batch=True,\n mini_batch_steps_per_iteration=1,\n kmeans_plus_plus_num_retries=2,\n relative_tolerance=None,\n config=None):\n params = {}\n params['num_clusters'] = num_clusters\n params['training_initial_clusters'] = initial_clusters\n params['distance_metric'] = distance_metric\n params['random_seed'] = random_seed\n params['use_mini_batch'] = use_mini_batch\n params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration\n params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries\n params['relative_tolerance'] = relative_tolerance\n super(KMeansClustering, self).__init__(\n model_fn=_kmeans_clustering_model_fn,\n params=params,\n model_dir=model_dir,\n config=config)",
"def __init__(self, model_names):\n for name in model_names:\n model = spacy.load(name)\n self.pool[name] = SharedModel(name, model)\n log.debug(\"Initialized shared models in pool\")",
"def _init_cluster(self):\n self._Init_Cluster()",
"def initialize_model(self):\n pass",
"def create_models(self):\n model_list = []\n for i in range(0, len(self.X.cluster.unique())):\n foo_model = self.model\n foo_model.set_params(**self.best_params_list[i])\n model_list.append(foo_model)\n return model_list",
"def __init__(self, seqrecords, threshold, consensus_threshold):\n # iterate through each cluster and find a representative\n self.clusters = list()\n self.threshold = threshold\n self.consensus_threshold = consensus_threshold\n for record in seqrecords:\n try:\n min_dist, clst = min((c.distance(record), c) for c in self.clusters)\n except ValueError:\n # If there aren't any clusters yet, we need to create a new one; this is a somewhat hackish\n # way of doihng so\n min_dist = threshold + 1\n if min_dist < threshold:\n clst.add(record)\n else:\n new_cluster = Cluster(record, consensus_threshold)\n self.clusters.append(new_cluster)",
"def init_models(self):\n from ron import Application\n from ron.models.basemodel import BaseModel\n if self.models == None or not Application().db:\n return\n models_namespace = self.__namespace + \".models\" # TODO: allow customize this\n try:\n models_package = import_module(models_namespace)\n except:\n models_package = None\n if models_package:\n models_modules = self._get_package_modules(models_package)\n for model_name in models_modules:\n imported_model = import_module('.' + model_name, package=models_namespace)\n for i in dir(imported_model):\n attribute = getattr(imported_model, i)\n if inspect.isclass(attribute) and issubclass(attribute, BaseModel):\n self.models.append(attribute)\n Application().db().database.create_tables(self.models)",
"def init_model(self):\n pass",
"def reInitAndRun(self):\n self.playlists = self.readPlaylistData()\n self.audioDF = self.readAudioData(shouldProcess=True)\n self.clusterLabels = []\n self.models = Clusterers(k=len(self.playlists))\n self.processAndCluster()\n self.analyzeResults()",
"def models(self):\n umap_model = umap.UMAP(\n n_neighbors=15, n_components=self.n_umap, metric=\"cosine\"\n )\n umap_model = umap_model.fit(self.sentence_embeddings)\n umap_embeddings = umap_model.transform(self.sentence_embeddings)\n kmenoid_model = KMedoids(\n n_clusters=self.n_clusters, metric=\"cosine\", init=\"random\", random_state=15\n )\n cluster = kmenoid_model.fit(umap_embeddings)\n return cluster, umap_model",
"def _cluster_model_bundle(self, model, model_clust_thr, identifier=None):\n thresholds = [30, 20, 15, model_clust_thr]\n model_cluster_map = qbx_and_merge(model, thresholds,\n nb_pts=self.nb_points,\n rng=self.rng,\n verbose=False)\n self.model_centroids = model_cluster_map.centroids\n len_centroids = len(self.model_centroids)\n if len_centroids > 1000:\n logging.warning('Model {0} simplified at threshod '\n '{1}mm with {2} centroids'.format(identifier,\n str(model_clust_thr),\n str(len_centroids)))",
"def _init_lda(self):\n if False: #os.path.exists(self.MODEL_PATH):\n self.lda = gensim.models.ldamodel.LdaModel.load(self.MODEL_PATH)\n else:\n # chunksize determines the number of documents to be processed in a worker.\n self.lda = gensim.models.ldamodel.LdaModel(\n corpus=None, id2word=self.dictionary, num_topics=self.num_topics,\n update_every=self.update_every, chunksize=self.chunksize,\n passes=self.passes, distributed=self.distributed)",
"def initialize_model(self, initial_data):\n # EDIT THIS METHOD TO RETURN A MINIMAX MODEL ###\n return None",
"def loadmodels(self):\n for emane_model in EMANE_MODELS:\n logger.info(\"loading emane model: (%s) %s - %s\",\n emane_model, emane_model.name, RegisterTlvs(emane_model.config_type))\n self._modelclsmap[emane_model.name] = emane_model\n self.session.add_config_object(emane_model.name, emane_model.config_type,\n emane_model.configure_emane)",
"def train_clustermodel_sparse_long(self):\n\n print('Clustering using: ' + self.algorithm)\n uniquesegments_df, sparse_matrix = self.create_sparse_matrix_long(self.data)\n\n clusterer = self.clustering_algorithms[self.algorithm]\n self.clustering_model = clusterer.fit(sparse_matrix)\n \n clusters_df = pd.DataFrame(self.clustering_model.labels_, columns = ['cluster_sparse_long'])\n clusters_df['segmentskey'] = clusters_df.index\n clusters_df = clusters_df.reset_index(drop=True)\n self.clusters_df_final = pd.merge(uniquesegments_df, clusters_df, on=['segmentskey'])\n self.clusters_df_final['cluster_sparse_long'].value_counts()\n \n today = datetime.date.today()\n filename = self.algorithm + '_sparse_long_cluster_model_' + today.strftime('%Y%m%d') + '.pkl'\n joblib.dump(self.clustering_model, filename)\n \n print('Stored ' + filename)\n \n return self.clustering_model, self.clusters_df_final[['segment_id','cluster_sparse_long']]",
"def _init_s3_train_files(self):\n # XGBoost requires libsvm training and validation files when invoking fit()\n self._prepare_libsvm_data()\n\n if self.n_classes <= 2:\n self.hyperparameters['eval_metric'] = 'auc'\n self.hyperparameters['objective'] = 'binary:logistic'\n else:\n self.hyperparameters['objective'] = 'multi:softprob'\n self.hyperparameters['num_class'] = self.n_classes\n\n s3_input_training = sagemaker.s3_input(s3_data=self.s3_training_libsvm_path, content_type='libsvm')\n s3_input_validation = sagemaker.s3_input(s3_data=self.s3_validation_libsvm_path, content_type='libsvm')\n return s3_input_training, s3_input_validation",
"def loadmodels(): # type: () -> None\n\n global accsearch, unaccsearch, eulamodel\n\n accsearch = [row for row in helpers.accExamples if helpers.goodsize(row['Clause Text'])]\n accsearch = [addtoks(row) for row in accsearch]\n unaccsearch = [row for row in helpers.unaccExamples if helpers.goodsize(row['Clause Text'])]\n unaccsearch = [addtoks(row) for row in unaccsearch]\n modeldir = helpers.getmodelfolder()\n accargs = buildbertargs()\n accargs.output_dir = modeldir\n eulamodel = ClassificationModel('roberta', modeldir, args=accargs, weight=[2, 1], use_cuda=False)",
"def __init__(self, conn, args, data, split_type, num_clusters):\n\n self.conn = conn\n self.args = args\n self.data = data\n self.split_type = split_type\n\n self.pca_model = None\n self.cluster_model = None\n self.algorithm = args['cluster_algorithm']\n\n # http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n affinity_propagation = cluster.AffinityPropagation()\n ms = cluster.MeanShift(bin_seeding=True)\n spectral = cluster.SpectralClustering(n_clusters=num_clusters, \n eigen_solver='arpack',\n affinity=\"nearest_neighbors\", \n random_state=self.args['seed'])\n ward = cluster.AgglomerativeClustering(n_clusters=num_clusters, \n linkage='ward')\n birch = cluster.Birch(n_clusters=num_clusters)\n two_means = cluster.MiniBatchKMeans(n_clusters=num_clusters,\n random_state=self.args['seed'])\n average_linkage = cluster.AgglomerativeClustering(linkage=\"average\", \n n_clusters=num_clusters)\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n kmeans = cluster.KMeans(n_clusters=num_clusters, random_state=self.args['seed'])\n dbscan = cluster.DBSCAN()\n \n self.clustering_algorithms = {\n 'MiniBatchKMeans': two_means,\n 'AffinityPropagation': affinity_propagation,\n 'MeanShift': ms,\n 'SpectralClustering': spectral,\n 'Ward': ward,\n 'AgglomerativeClustering': average_linkage,\n 'DBSCAN': dbscan,\n 'Birch': birch,\n 'HDBSCAN': hdbsc,\n 'KMeans': kmeans\n }",
"def __init_cluster(self, cluster):\n self.___init_nodes(cluster)\n self.__clusterop.async_rebalance(\n cluster.get_nodes(),\n cluster.get_nodes()[1:],\n []).result()",
"def train_clustermodel_nonsparse(self):\n \n segtimes_df, nonsparse_matrix = self.create_nonsparse_matrix(self.data)\n segtimes_df['index']=segtimes_df.index\n nonsparse_matrix['index']=nonsparse_matrix.index\n data_to_scale = pd.merge(segtimes_df, nonsparse_matrix, on=['index'])\n data_scaled = self.scale_matrix(data_to_scale)\n data_to_cluster = data_scaled.drop(columns = ['segment_id','level_0','date','time'])\n \n print('Clustering using nonsparse segment/time matrix and: ' + self.algorithm)\n clusterer = self.clustering_algorithms[self.algorithm]\n self.clustering_model = clusterer.fit(data_to_cluster)\n \n clusters_df = pd.DataFrame(self.clustering_model.labels_, columns = ['cluster_nonsparse'])\n clusters_df['segtimekey'] = clusters_df.index\n segtimes_df['segtimekey'] = segtimes_df.index\n clusters_df = clusters_df.reset_index(drop=True)\n self.clusters_df_final = pd.merge(segtimes_df, clusters_df, on=['segtimekey'])\n self.clusters_df_final['cluster_nonsparse'].value_counts()\n \n today = datetime.date.today()\n filename = self.algorithm + '_nonsparse_cluster_model_' + today.strftime('%Y%m%d') + '.pkl'\n joblib.dump(self.clustering_model, filename)\n \n print('Stored ' + filename)\n \n return self.clustering_model, self.clusters_df_final[['segment_id','date','time','cluster_nonsparse']]",
"def load(self) -> None:\n # Load in centroids\n if (self._path_model / f\"{self}\").is_file():\n with open(self._path_model / str(self), 'r') as file:\n self._centroids = {k: np.asarray(v, dtype=np.float32) for k, v in json.load(file).items()}\n \n # Load in (validation) clusters\n if (self._path_data / f\"{self}-train\").is_file():\n with open(self._path_data / f\"{self}-train\", 'r') as file:\n self._clusters = json.load(file)\n if (self._path_data / f\"{self}-val\").is_file():\n with open(self._path_data / f\"{self}-val\", 'r') as file:\n self._clusters_val = json.load(file)",
"def set_train(self):\n for m in self.models.values():\n m.train()",
"def setup_models(self):\n pass"
] |
[
"0.6589613",
"0.6397897",
"0.6296286",
"0.60146695",
"0.5976117",
"0.59535027",
"0.5843019",
"0.5837605",
"0.582323",
"0.5809182",
"0.5631231",
"0.55995786",
"0.54818463",
"0.5473843",
"0.5443327",
"0.54403687",
"0.5425224",
"0.54122424",
"0.54051256",
"0.53920186",
"0.5373891",
"0.53398466",
"0.53383255",
"0.5333826",
"0.5330129",
"0.5327863",
"0.5312568",
"0.53096706",
"0.5308501",
"0.52897745"
] |
0.8961721
|
0
|
Initialize an AKSManagedClusterContext object to store the context in the process of assemble the ManagedCluster object.
|
def init_context(self) -> None:
self.context = AKSManagedClusterContext(
self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.UPDATE
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.CREATE\n )",
"def init_mc(self) -> ManagedCluster:\n # Initialize a ManagedCluster object with mandatory parameter location.\n mc = self.models.ManagedCluster(\n location=self.context.get_location(),\n )\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def _init_cluster(self):\n self._Init_Cluster()",
"def init_models(self) -> None:\n self.models = AKSManagedClusterModels(self.cmd, self.resource_type)",
"def init_models(self) -> None:\n self.models = AKSManagedClusterModels(self.cmd, self.resource_type)",
"def __init__(self,\n num_clusters,\n model_dir=None,\n initial_clusters=RANDOM_INIT,\n distance_metric=SQUARED_EUCLIDEAN_DISTANCE,\n random_seed=0,\n use_mini_batch=True,\n mini_batch_steps_per_iteration=1,\n kmeans_plus_plus_num_retries=2,\n relative_tolerance=None,\n config=None):\n params = {}\n params['num_clusters'] = num_clusters\n params['training_initial_clusters'] = initial_clusters\n params['distance_metric'] = distance_metric\n params['random_seed'] = random_seed\n params['use_mini_batch'] = use_mini_batch\n params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration\n params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries\n params['relative_tolerance'] = relative_tolerance\n super(KMeansClustering, self).__init__(\n model_fn=_kmeans_clustering_model_fn,\n params=params,\n model_dir=model_dir,\n config=config)",
"def make_cluster_context(cluster, show_deleted=False):\n context = RequestContext(user_name=cluster.trustee_username,\n password=cluster.trustee_password,\n trust_id=cluster.trust_id,\n show_deleted=show_deleted,\n user_domain_id=CONF.trust.trustee_domain_id,\n user_domain_name=CONF.trust.trustee_domain_name)\n return context",
"def __init_cluster(self, cluster):\n self.___init_nodes(cluster)\n self.__clusterop.async_rebalance(\n cluster.get_nodes(),\n cluster.get_nodes()[1:],\n []).result()",
"def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def __init__(self, conn, args, data, split_type, num_clusters):\n\n self.conn = conn\n self.args = args\n self.data = data\n self.split_type = split_type\n\n self.pca_model = None\n self.cluster_model = None\n self.algorithm = args['cluster_algorithm']\n\n # http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n affinity_propagation = cluster.AffinityPropagation()\n ms = cluster.MeanShift(bin_seeding=True)\n spectral = cluster.SpectralClustering(n_clusters=num_clusters, \n eigen_solver='arpack',\n affinity=\"nearest_neighbors\", \n random_state=self.args['seed'])\n ward = cluster.AgglomerativeClustering(n_clusters=num_clusters, \n linkage='ward')\n birch = cluster.Birch(n_clusters=num_clusters)\n two_means = cluster.MiniBatchKMeans(n_clusters=num_clusters,\n random_state=self.args['seed'])\n average_linkage = cluster.AgglomerativeClustering(linkage=\"average\", \n n_clusters=num_clusters)\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n kmeans = cluster.KMeans(n_clusters=num_clusters, random_state=self.args['seed'])\n dbscan = cluster.DBSCAN()\n \n self.clustering_algorithms = {\n 'MiniBatchKMeans': two_means,\n 'AffinityPropagation': affinity_propagation,\n 'MeanShift': ms,\n 'SpectralClustering': spectral,\n 'Ward': ward,\n 'AgglomerativeClustering': average_linkage,\n 'DBSCAN': dbscan,\n 'Birch': birch,\n 'HDBSCAN': hdbsc,\n 'KMeans': kmeans\n }",
"def initClusters(self):\n if len(self.labelList) != len(self.pointList):\n \traise ValueError(\"Label List and Point List not the same length!\")\n for i in range(len(self.labelList)):\n self.centroids[self.labelList[i]] = self.pointList[i]\n self.pointcounts[self.labelList[i]] = 1",
"def initialize_cluster(cluster):\n logger.info('Creating a new cluster for %s...', cluster)\n\n configuration = ClusterConfiguration(version=__version__)\n ztransaction = cluster.zookeeper.transaction()\n ztransaction.create(cluster.path, BinaryCodec(ClusterConfiguration).encode(configuration))\n ztransaction.create(cluster.get_set_path())\n commit(ztransaction)",
"def __init__(self, cluster_metadata: MasterURLIdentifier) -> None:\n self.cluster_metadata = cluster_metadata\n if self.cluster_metadata.region == 'global':\n # The global region is unsupported as it will be eventually deprecated.\n raise ValueError('Clusters in the global region are not supported.')\n elif not self.cluster_metadata.region:\n _LOGGER.warning(\n 'No region information was detected, defaulting Dataproc cluster '\n 'region to: us-central1.')\n self.cluster_metadata.region = 'us-central1'\n\n if not self.cluster_metadata.cluster_name:\n self.cluster_metadata.cluster_name = ie.current_env(\n ).clusters.default_cluster_name\n\n from google.cloud import dataproc_v1\n self._cluster_client = dataproc_v1.ClusterControllerClient(\n client_options={\n 'api_endpoint': \\\n f'{self.cluster_metadata.region}-dataproc.googleapis.com:443'\n })\n\n if self.cluster_metadata in ie.current_env().clusters.master_urls.inverse:\n self.master_url = ie.current_env().clusters.master_urls.inverse[\n self.cluster_metadata]\n else:\n self.master_url = None",
"def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center",
"def cluster_manager(self):\n # Lazily instantiate the cluster manager the first time it is asked for.\n if not hasattr(self, '_cluster_manager'):\n if self._cluster_engine:\n self._cluster_manager = self._cluster_engine.create_manager(\n self._username,\n self._tenancy\n )\n else:\n self._cluster_manager = None\n # If there is still no cluster manager, clusters are not supported\n if not self._cluster_manager:\n raise errors.UnsupportedOperationError(\n 'Clusters are not supported for this tenancy.'\n )\n return self._cluster_manager",
"def create(self):\n data = {\"language\": self.language, \"clusterId\": self.cluster_id}\n response = self.post(self.url, \"1.2\", \"contexts/create\", data=data, token=self.token)\n self.id = response.get(\"id\", None)\n if self.id is None:\n raise DatabricksApiException(403, 4, \"Context ID missing\")",
"def __init__(self, centroids = None, n_clusters = None, n_features = None, alpha=1.0, **kwargs):\n \n super(ClusteringLayer, self).__init__(**kwargs)\n self.alpha = alpha\n self.initial_centroids = centroids\n\n if centroids is not None:\n n_clusters, n_features = centroids.shape\n\n self.n_features, self.n_clusters = n_features, n_clusters\n\n assert self.n_clusters is not None\n assert self.n_features is not None",
"def __init__(self, cluster_name: str, zone: str, sa_credentials_file_path: str):\n self.cluster_name = cluster_name\n self._credentials, self.project_id = load_credentials_from_file(\n sa_credentials_file_path, scopes=[\"https://www.googleapis.com/auth/cloud-platform\"])\n self.zone = zone\n\n # Generate the GCP Cluster Manager Client.\n # See: https://googleapis.dev/python/container/latest/container_v1/cluster_manager.html\n self.client = ClusterManagerClient(credentials=self.credentials)",
"def fetch_mc(self) -> ManagedCluster:\n mc = self.client.get(self.context.get_resource_group_name(), self.context.get_name())\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def create_k_context(self):\n if self.k == 0:\n self.contextdata = self.data\n\n if self.contextdata is None:\n print(\"Start creating k-context Parallel\")\n\n with mp.Pool(mp.cpu_count()) as p:\n result = p.map(self.create_k_context_trace, self.data.groupby([self.trace]))\n self.contextdata = pd.concat(result, ignore_index=True)",
"def __init__(\n self,\n clustering_algorithm,\n n_clusters: int,\n cluster_args: dict,\n checkpoints_path: str,\n batch_size: int = 1024,\n is_batched: bool = False):\n super().__init__()\n self.clustering_algorithm = clustering_algorithm\n self.n_clusters = n_clusters\n self.batch_size = batch_size\n self.cluster_args = cluster_args\n self.checkpoints_path = checkpoints_path\n self.is_batched = is_batched",
"def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.num_clusters\n assert p.attention_window\n assert not p.packed_input\n\n clustering_p = p.clustering\n clustering_p.num_clusters = p.num_clusters\n clustering_p.num_heads = p.num_heads\n clustering_p.dim_per_head = p.dim_per_head or p.hidden_dim // p.num_heads\n # We normalize manually prior so that we can reuse the same normalized\n # query/key to compute attention probs later.\n clustering_p.apply_layer_norm = False\n self.CreateChild('clustering', clustering_p)",
"def __init__(self, cluster, topic_metadata):\n self._name = topic_metadata.name\n self._cluster = cluster\n self._partitions = {}\n self.update(topic_metadata)",
"def train_clustermodel_sparse(self):\n\n print('Clustering using: ' + self.algorithm)\n uniquesegments_df, sparse_matrix = self.create_sparse_matrix(self.data)\n\n clusterer = self.clustering_algorithms[self.algorithm]\n self.clustering_model = clusterer.fit(sparse_matrix)\n \n clusters_df = pd.DataFrame(self.clustering_model.labels_, columns = ['cluster_sparse'])\n clusters_df['segmentskey'] = clusters_df.index\n clusters_df = clusters_df.reset_index(drop=True)\n self.clusters_df_final = pd.merge(uniquesegments_df, clusters_df, on=['segmentskey'])\n self.clusters_df_final['cluster_sparse'].value_counts()\n \n today = datetime.date.today()\n filename = self.algorithm + '_sparse_cluster_model_' + today.strftime('%Y%m%d') + '.pkl'\n joblib.dump(self.clustering_model, filename)\n \n print('Stored ' + filename)\n \n return self.clustering_model, self.clusters_df_final[['segment_id','cluster_sparse']]",
"def init_with_context(self, context):\n pass",
"def __init__(self, cores=1, *args, **kwargs):\n print('Initializing LocalClusterBackend Backend.')\n silence_function(1, super().__init__, *args, **kwargs)\n self.manager.start()\n\n self.spawn_client(cores, hang=False, silence_level=1)\n \n dispatcher = MulticoreDispatcher(1) #Negligible monitoring core\n dispatcher.run(\n silence_function, 1,\n LocalClusterBackend._monitor_active_tasks, *self.manager_creds\n )\n self.dispatchers.append(dispatcher)",
"def __init__(self, context):\n super(CollectingState, self).__init__(context)\n self._ops_list = []\n self._ops_list += State.trans_stores\n self._inner_state = \"init\"",
"def initialize(self, context):\n raise NotImplementedError",
"def Cluster(request, io_loop):\n\n def ClusterConstructor(**kwargs):\n log = logging.getLogger(__file__)\n log.setLevel(logging.DEBUG)\n log.handlers = [logging.StreamHandler(sys.stdout)]\n kwargs['log'] = log\n engine_launcher_class = kwargs.get(\"engine_launcher_class\")\n\n if (\n isinstance(engine_launcher_class, str)\n and \"MPI\" in engine_launcher_class\n and shutil.which(\"mpiexec\") is None\n ):\n pytest.skip(\"requires mpiexec\")\n\n cfg = kwargs.setdefault(\"config\", Config())\n cfg.EngineLauncher.engine_args = ['--log-level=10']\n cfg.ControllerLauncher.controller_args = ['--log-level=10']\n kwargs.setdefault(\"controller_args\", ['--ping=250'])\n\n c = cluster.Cluster(**kwargs)\n assert c.config is cfg\n request.addfinalizer(c.stop_cluster_sync)\n return c\n\n yield ClusterConstructor",
"def _setup_test_cluster(self, return_cluster, name, create_args):\n stack_name = '{0}_stack'.format(name)\n templ, self.stack = self._setup_test_stack(stack_name, TEMPLATE)\n cluster_instance = cbd.CloudBigData('%s_name' % name,\n templ.resource_definitions(\n self.stack)['cbd_cluster'],\n self.stack)\n self._stubout_create(return_cluster)\n return cluster_instance"
] |
[
"0.88289833",
"0.7161641",
"0.59363616",
"0.5745033",
"0.5745033",
"0.5690117",
"0.56848526",
"0.568348",
"0.56026167",
"0.5602479",
"0.5587021",
"0.556977",
"0.5519842",
"0.5515008",
"0.547153",
"0.5434008",
"0.54178995",
"0.53875893",
"0.53501666",
"0.5329567",
"0.53131104",
"0.5306695",
"0.52731794",
"0.5263299",
"0.52630234",
"0.52444804",
"0.52171826",
"0.5174377",
"0.5165401",
"0.51447463"
] |
0.86212176
|
1
|
Initialize an AKSAgentPoolAddDecorator object to assemble the AgentPool profile.
|
def init_agentpool_decorator_context(self) -> None:
self.agentpool_decorator = AKSAgentPoolUpdateDecorator(
self.cmd, self.client, self.__raw_parameters, self.resource_type, self.agentpool_decorator_mode
)
self.agentpool_context = self.agentpool_decorator.context
self.context.attach_agentpool_context(self.agentpool_context)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def init_agentpool_decorator_context(self) -> None:\n self.agentpool_decorator = AKSAgentPoolAddDecorator(\n self.cmd, self.client, self.__raw_parameters, self.resource_type, self.agentpool_decorator_mode\n )\n self.agentpool_context = self.agentpool_decorator.context\n self.context.attach_agentpool_context(self.agentpool_context)",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def _add_pool ( self, pool ):\n self._pool_id += 1\n try:\n self._poolstack.append ( pool )\n except:\n self._pool_id -= 1\n raise\n\n self._update_resolver()",
"def pre_loadbalancer_pool_create(self, resource_dict):\n pass",
"def post_loadbalancer_pool_create(self, resource_dict):\n pass",
"def create_pool(self, argu):\n\n if not argu:\n LOG.error(\"In create_pool, it should not pass the None.\")\n\n cmd_apv_create_group = ADCDevice.create_group(argu['pool_id'], argu['lb_algorithm'], argu['session_persistence_type'])\n for base_rest_url in self.base_rest_urls:\n self.run_cli_extend(base_rest_url, cmd_apv_create_group)\n\n # create policy\n self._create_policy(argu['pool_id'],\n argu['listener_id'],\n argu['session_persistence_type'],\n argu['lb_algorithm'],\n argu['cookie_name']\n )",
"def attach_agentpool_context(self, agentpool_context: AKSAgentPoolContext) -> None:\n if self.agentpool_context is None:\n self.agentpool_context = agentpool_context\n else:\n msg = \"the same\" if self.agentpool_context == agentpool_context else \"different\"\n raise CLIInternalError(\n \"Attempting to attach the `agentpool_context` object again, the two objects are {}.\".format(\n msg\n )\n )",
"def __init__(self, poolbot):\n self.poolbot = poolbot",
"def __init__(__self__,\n resource_name: str,\n args: RegistryAgentPoolArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def create_pool(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.create_pool(\n protocol=data['pool']['protocol'],\n lb_algorithm=data['pool']['lb_algorithm'],\n session_persistence=data['pool'].get('session_persistence'),\n listener_id=kwargs['listener_id'],\n loadbalancer_id=kwargs['loadbalancer_id'],\n name=data['pool'].get('name'),\n description=data['pool'].get('description'),\n admin_state_up=data['pool'].get('admin_state_up'),\n tls_enabled=data['pool'].get('tls_enabled'),\n # Replace empty string by None (uses default tls cipher string)\n tls_ciphers=data['pool'].get('tls_ciphers') or None,\n )\n\n if data.get('members'):\n args = (request, kwargs['loadbalancer_id'], add_member)\n kwargs = {'callback_kwargs': {'pool_id': pool.id,\n 'index': 0}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n elif data.get('monitor'):\n args = (request, kwargs['loadbalancer_id'], create_health_monitor)\n kwargs = {'callback_kwargs': {'pool_id': pool.id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n\n return _get_sdk_object_dict(pool)",
"def add_to_pool(self):\n if self.check_pool():\n for func in self.getter._func:\n proxies = self.getter.get_proxies(func)\n for proxy in proxies:\n self.conn.push_to_right(proxy)\n else:\n print('Pool reached max capacity')",
"def __init__(__self__, *,\n pool_size: Optional[pulumi.Input['BuildServiceAgentPoolSizePropertiesArgs']] = None):\n if pool_size is not None:\n pulumi.set(__self__, \"pool_size\", pool_size)",
"def __init__(__self__,\n resource_name: str,\n args: Optional[TargetPoolArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(self, model: Module, settings: PGActorSettings) -> None:\n super().__init__(settings)\n\n final_layer, self.network = finalize_module(model, from_numpy(self.state_space.sample()),\n self._num_policy_params)\n self.settings.optimizer.add_param_group({\"params\": final_layer.parameters()})",
"def __init__(self, *args):\n _snap.TBigStrPool_swiginit(self, _snap.new_TBigStrPool(*args))",
"def __init__(__self__, *,\n autoprovisioned: Optional[pulumi.Input[bool]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n location_policy: Optional[pulumi.Input['NodePoolAutoscalingLocationPolicy']] = None,\n max_node_count: Optional[pulumi.Input[int]] = None,\n min_node_count: Optional[pulumi.Input[int]] = None,\n total_max_node_count: Optional[pulumi.Input[int]] = None,\n total_min_node_count: Optional[pulumi.Input[int]] = None):\n if autoprovisioned is not None:\n pulumi.set(__self__, \"autoprovisioned\", autoprovisioned)\n if enabled is not None:\n pulumi.set(__self__, \"enabled\", enabled)\n if location_policy is not None:\n pulumi.set(__self__, \"location_policy\", location_policy)\n if max_node_count is not None:\n pulumi.set(__self__, \"max_node_count\", max_node_count)\n if min_node_count is not None:\n pulumi.set(__self__, \"min_node_count\", min_node_count)\n if total_max_node_count is not None:\n pulumi.set(__self__, \"total_max_node_count\", total_max_node_count)\n if total_min_node_count is not None:\n pulumi.set(__self__, \"total_min_node_count\", total_min_node_count)",
"def __init__(self, args, number_of_labels, number_of_features,adj):\n super(SpGAT, self).__init__()\n self.args=args\n \n self.number_of_labels = number_of_labels\n self.number_of_features = number_of_features\n self.device = args.device\n self.adj= sparse_mx_to_torch_sparse_tensor(adj).to(self.device).to_dense()\n self.attentions = [SpGraphAttentionLayer(number_of_features, \n args.hidden, \n dropout=args.dropout, \n alpha=args.alpha, \n concat=True) for _ in range(args.nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = SpGraphAttentionLayer(args.hidden * args.nheads, \n args.Q, \n dropout=args.dropout, \n alpha=args.alpha, \n concat=False)",
"def __init__(self, *args):\n _snap.TStrPool_swiginit(self, _snap.new_TStrPool(*args))",
"def _init_pool(self, cfg: dict):\n pool = PyMysqlPoolBase(**cfg)\n return pool",
"def __init__(self, module, poolclass=QueuePool, **kw):\n\n self.module = module\n self.kw = kw\n self.poolclass = poolclass\n self.pools = {}\n self._create_pool_mutex = threading.Lock()",
"def __init__(self, pool_size):\n \n self.pool_size=pool_size;",
"def __init__(__self__, *,\n allocated_outbound_ports: Optional[pulumi.Input[int]] = None,\n backend_pool_type: Optional[pulumi.Input[Union[str, 'BackendPoolType']]] = None,\n effective_outbound_ips: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]] = None,\n enable_multiple_standard_load_balancers: Optional[pulumi.Input[bool]] = None,\n idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,\n managed_outbound_ips: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs']] = None,\n outbound_ip_prefixes: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPPrefixesArgs']] = None,\n outbound_ips: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPsArgs']] = None):\n if allocated_outbound_ports is None:\n allocated_outbound_ports = 0\n if allocated_outbound_ports is not None:\n pulumi.set(__self__, \"allocated_outbound_ports\", allocated_outbound_ports)\n if backend_pool_type is None:\n backend_pool_type = 'NodeIPConfiguration'\n if backend_pool_type is not None:\n pulumi.set(__self__, \"backend_pool_type\", backend_pool_type)\n if effective_outbound_ips is not None:\n pulumi.set(__self__, \"effective_outbound_ips\", effective_outbound_ips)\n if enable_multiple_standard_load_balancers is not None:\n pulumi.set(__self__, \"enable_multiple_standard_load_balancers\", enable_multiple_standard_load_balancers)\n if idle_timeout_in_minutes is None:\n idle_timeout_in_minutes = 30\n if idle_timeout_in_minutes is not None:\n pulumi.set(__self__, \"idle_timeout_in_minutes\", idle_timeout_in_minutes)\n if managed_outbound_ips is not None:\n pulumi.set(__self__, \"managed_outbound_ips\", managed_outbound_ips)\n if outbound_ip_prefixes is not None:\n pulumi.set(__self__, \"outbound_ip_prefixes\", outbound_ip_prefixes)\n if outbound_ips is not None:\n pulumi.set(__self__, \"outbound_ips\", outbound_ips)",
"def addpool(miner: Miner, pool):\n api = MinerApi(host=miner.ipaddress, port=int(miner.port))\n jaddpool = api.addpool(\"{0},{1},{2}\".format(pool.url, pool.user, \"x\"))\n return jaddpool[\"STATUS\"][0][\"Msg\"]",
"def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n super(SpGAT, self).__init__()\n self.dropout = dropout\n\n self.attentions = [SpGraphAttentionLayer(nfeat, \n nhid, \n dropout=dropout, \n alpha=alpha, \n concat=True) for _ in range(nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = SpGraphAttentionLayer(nhid * nheads, \n nclass, \n dropout=dropout, \n alpha=alpha, \n concat=False)",
"def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n super(SpGAT, self).__init__()\n self.dropout = dropout\n\n self.attentions = [SpGraphAttentionLayer(nfeat, \n nhid, \n dropout=dropout, \n alpha=alpha, \n concat=True) for _ in range(nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = SpGraphAttentionLayer(nhid * nheads, \n nclass, \n dropout=dropout, \n alpha=alpha, \n concat=False)",
"def __init__(self, pool_size: float = 10):\n self.pool_size = pool_size",
"def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n super(SpGAT, self).__init__()\n self.dropout = dropout\n\n self.attentions = [SpGraphAttentionLayer(nfeat,\n nhid,\n dropout=dropout,\n alpha=alpha,\n concat=True) for _ in range(nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = SpGraphAttentionLayer(nhid * nheads,\n nclass,\n dropout=dropout,\n alpha=alpha,\n concat=False)",
"def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n super(SpGAT, self).__init__()\n self.dropout = dropout\n\n self.attentions = [SpGraphAttentionLayer(nfeat,\n nhid,\n dropout=dropout,\n alpha=alpha,\n concat=True) for _ in range(nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = SpGraphAttentionLayer(nhid * nheads,\n nclass,\n dropout=dropout,\n alpha=alpha,\n concat=False)",
"def post(self, request):\n kwargs = {'loadbalancer_id': request.DATA.get('loadbalancer_id'),\n 'listener_id': request.DATA.get('parentResourceId')}\n return create_pool(request, **kwargs)",
"def add_pool(ctx, pool_name, global_ip_range, global_port_range):\n\n if len(pool_name) > 32:\n ctx.fail(\"Invalid pool name. Maximum allowed pool name is 32 characters !!\")\n\n # Verify the ip address range and format\n ip_address = global_ip_range.split(\"-\")\n if len(ip_address) > 2:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n elif len(ip_address) == 2:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n\n if is_valid_ipv4_address(ip_address[1]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[1]))\n\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[1]))\n if ipLowLimit >= ipHighLimit:\n ctx.fail(\"Given ip address range {} is invalid. Please enter a valid ip address range !!\".format(global_ip_range))\n else:\n if is_valid_ipv4_address(ip_address[0]) is False:\n ctx.fail(\"Given ip address {} is not valid global address. Please enter a valid ip address !!\".format(ip_address[0]))\n ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))\n ipHighLimit = int(ipaddress.IPv4Address(ip_address[0]))\n\n # Verify the port address range and format\n if global_port_range is not None: \n port_address = global_port_range.split(\"-\")\n\n if len(port_address) > 2:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n elif len(port_address) == 2:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n\n if is_valid_port_address(port_address[1]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[1]))\n\n portLowLimit = int(port_address[0])\n portHighLimit = int(port_address[1])\n if portLowLimit >= portHighLimit:\n ctx.fail(\"Given port address range {} is invalid. Please enter a valid port address range !!\".format(global_port_range))\n else:\n if is_valid_port_address(port_address[0]) is False:\n ctx.fail(\"Given port value {} is invalid. Please enter a valid port value !!\".format(port_address[0]))\n else:\n global_port_range = \"NULL\"\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n entryFound = False\n table = \"NAT_POOL\"\n key = pool_name\n dataKey1 = 'nat_ip'\n dataKey2 = 'nat_port'\n\n data = config_db.get_entry(table, key)\n if data:\n if data[dataKey1] == global_ip_range and data[dataKey2] == global_port_range:\n click.echo(\"Trying to add pool, which is already present.\")\n entryFound = True\n\n pool_dict = config_db.get_table(table) \n if len(pool_dict) == 16:\n click.echo(\"Failed to add pool, as already reached maximum pool limit 16.\")\n entryFound = True\n\n # Verify the Ip address is overlapping with any Static NAT entry\n if entryFound == False:\n static_dict = config_db.get_table('STATIC_NAT')\n if static_dict:\n for staticKey, staticValues in static_dict.items():\n global_ip = \"---\"\n local_ip = \"---\"\n nat_type = \"dnat\"\n\n if isinstance(staticKey, str) is True:\n global_ip = staticKey\n else:\n continue\n\n local_ip = staticValues[\"local_ip\"]\n\n if \"nat_type\" in staticValues:\n nat_type = staticValues[\"nat_type\"]\n\n if nat_type == \"snat\":\n global_ip = local_ip\n\n ipAddress = int(ipaddress.IPv4Address(global_ip))\n if (ipAddress >= ipLowLimit and ipAddress <= ipHighLimit):\n ctx.fail(\"Given Ip address entry is overlapping with existing Static NAT entry !!\")\n\n if entryFound == False:\n config_db.set_entry(table, key, {dataKey1: global_ip_range, dataKey2 : global_port_range})"
] |
[
"0.82316643",
"0.61094487",
"0.58690387",
"0.58118975",
"0.5642255",
"0.5622449",
"0.5469193",
"0.5461134",
"0.5350976",
"0.5324465",
"0.5300219",
"0.52914625",
"0.5236859",
"0.5222781",
"0.51754194",
"0.51687443",
"0.51389706",
"0.51012224",
"0.5097092",
"0.50748473",
"0.50532436",
"0.5051007",
"0.5031677",
"0.50305253",
"0.50305253",
"0.5022982",
"0.49977764",
"0.49977764",
"0.49700692",
"0.49682108"
] |
0.7490148
|
1
|
Helper function to check whether any parameters are set. If the values of all the parameters are the default values, the command execution will be terminated early and raise a RequiredArgumentMissingError. Neither the request to fetch or update the ManagedCluster object will be sent.
|
def check_raw_parameters(self):
# exclude some irrelevant or mandatory parameters
excluded_keys = ("cmd", "client", "resource_group_name", "name")
# check whether the remaining parameters are set
# the default value None or False (and other empty values, like empty string) will be considered as not set
is_changed = any(v for k, v in self.context.raw_param.items() if k not in excluded_keys)
# special cases
# some parameters support the use of empty string or dictionary to update/remove previously set values
is_default = (
self.context.get_cluster_autoscaler_profile() is None and
self.context.get_api_server_authorized_ip_ranges() is None and
self.context.get_nodepool_labels() is None and
self.context.get_nodepool_taints() is None
)
if not is_changed and is_default:
reconcilePrompt = 'no argument specified to update would you like to reconcile to current settings?'
if not prompt_y_n(reconcilePrompt, default="n"):
# Note: Uncomment the followings to automatically generate the error message.
option_names = [
'"{}"'.format(format_parameter_name_to_option_name(x))
for x in self.context.raw_param.keys()
if x not in excluded_keys
]
error_msg = "Please specify one or more of {}.".format(
" or ".join(option_names)
)
raise RequiredArgumentMissingError(error_msg)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_required(self):\n for argument in self.arguments:\n if argument.required:\n raise ArgumentRequiredError(argument, self.tagname)\n else:\n self.kwargs[argument.name] = argument.get_default()",
"def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecGnomv0_1.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataQ, \"Scattering vector values are missing\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataValues, \"Experimental intensity values are missing\")",
"def checkNeededParams(self):\n for clp,value in self.neededParamsNames.items():\n if value[0] not in self.neededParams:\n print >> sys.stderr, clp+\" is a mandatory parameter \"\n self.printUsage()\n sys.exit(1)",
"def checkParameters(self):\n self.DEBUG(\"EDPluginExecDatGnomv1_0.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.inputCurve, \"No input Curve file provided\")",
"def _check_params(self):\n pass",
"def check_required(self, required):\n for k in required:\n if self.__dict__.get(k) is None:\n raise ValueError(\n \"Required argument: '{0}' not provided\".format(k))",
"def check_mandatory(params: Dict[str, str]):\n for key, val in params.items():\n if val is None or val == '':\n raise ValueError(f'Missing mandatory param: `{key}`.')",
"def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginControlAbsorptionv0_1.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")",
"def check_params_set():\n critical = {'machineinfo' : MACHINEID, \n 'error_serverinfo' : ERROR_SERVER, \n 'serverinfo' : SERVER}\n for i, val in critical.iteritems():\n if not val:\n print \"ERROR: Set value for \\\"%s\\\" in baseconfig.cfg file first\\n\" % i\n sys.exit(1)",
"def _check_args(self, args):\n if len(args) == 0:\n print(\"No parameters provided.\")\n return False\n else:\n return True",
"def params_optional(self) -> bool:\n result = True\n if self.no_params:\n # We will return False, because there are no params at all - optional or not.\n return False\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n # We should allow you to print out the options to a YAML file and fill it out like a form.\n # So right now, it will create a long Kubernetes policy, but it will have lots of empty lists that we have to fill out. Oh well.\n if not parameter_details.default_value:\n # if not parameter.default_value and parameter.default_value != [] and parameter.default_value != \"\":\n result = False\n break\n return result",
"def checkParameters(self):\n self.DEBUG(\"EDPluginExecDatcmpv2_0.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")\n self.checkMandatoryParameters(self.getDataInput().inputCurve, \"No input 1D curves file provided\")",
"def checkParameters(self):\n self.DEBUG(\"EDPluginWaitMultiFile.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.expectedFile, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.expectedSize, \"Data Input is None\")",
"def _validate_args(self, args):\r\n invalid_args = [k for k in self.required_params if args.get(k) is None]\r\n if invalid_args:\r\n raise ArgumentError('Missing required options: %s'\r\n % ','.join(invalid_args))",
"def params_required(self) -> bool:\n if self.no_params or self.params_optional:\n return False\n else:\n return True",
"def checkParameters(self):\n self.DEBUG(\"EDPluginExecDamstartv0_3.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.getInputPdbFile(), \"No template file specified\")",
"def _validate_params(self):\n assert set(self.required_params) - set(self._params) == set()\n for par, val in self.optional_params.items():\n if par not in self._params:\n self._params[par] = val",
"def _is_parameters_ok(self):\n if self.api_key is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs an api_key\")\n if self.location is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs a location\")\n\n return True",
"def _is_parameters_ok(self):\n if self.api_key is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs an api_key\")\n if self.location is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs a location\")\n\n return True",
"def validate_input_params(self):\n if isinstance(self.parameters, dict):\n # Setup the mandatory params for snowflake load\n mandatory_keys = ('load_type', 'hive_database', 'hive_table', 'sfSchema', 'sfTable', 'sfGrantee_roles')\n if not all(key in self.parameters for key in mandatory_keys):\n logging.info(\"Mandatory keys for GenieSnowflakeOperator(parameters): %s\\n\" % format(mandatory_keys))\n logging.error(\"Mandatory key(s) NOT exists in GenieSnowflakeOperator(parameters): %s\\n\" % format(self.parameters))\n raise Exception(\"Job failed\")\n\n # Setting up pre,post and grants scripts for snowflake\n self.sfPresteps_sql = self.parameters.get('sfPresteps_sql', self.sfPresteps_sql)\n self.sfPoststeps_sql = self.parameters.get('sfPoststeps_sql', self.sfPoststeps_sql)\n self.sfPostgrants_sql = self.parameters.get('sfPostgrants_sql', self.sfPostgrants_sql)\n else:\n logging.error(\"Input is NOT a dictionary: %s\\n\" % format(self.parameters))\n raise Exception(\"Job failed\")",
"def _check_required_opts(self, namespace=None):\n for info, group in self._all_opt_infos():\n opt = info['opt']\n\n if opt.required:\n if 'default' in info or 'override' in info:\n continue\n\n if self._get(opt.dest, group, namespace) is None:\n raise RequiredOptError(opt.name, group)",
"def _check_required_params(self):\n logging.debug('.. check if Experiment have all required parameters')\n for n in self.REQUIRED_PARAMS:\n if n not in self.params:\n raise ValueError('missing \"%s\" among %r' % (n, self.params.keys()))",
"def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecProcessScriptFindv0_1.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(),\"Data Input is None\")\n \n self.__searchPath = self.getDataInput().getSearchPath().getValue() \n self.__inputString = self.getDataInput().getInputString().getValue()",
"def precmd_hook_not_enough_parameters(self) -> plugin.PrecommandData:\n pass",
"def validate_params(cls, args):\n if not (len(args) == 3 or len(args) == 5 or len(args) == 7):\n sys.exit(\n 'Execute o script passando o caminho do diretório das'\n ' imagens, ou apenas o path de uma imagem e decida se'\n ' deseja mover ou não'\n )\n args_dict = cls.__make_params(args)\n keys_args_set = set(args_dict.keys())\n if keys_args_set.difference(KEYS_DEFAULT_AS_SET) != set():\n sys.exit(\n 'Verifique a passagem de parâmetros.'\n ' Foi encontrado parâmetros desconhecidos.'\n )\n\n return cls.__check_args(args_dict)",
"def __verify_required_parameters(self, parameters, required_parameters):\n\n\t\tfor parameter in required_parameters:\n\t\t\tif False == parameters.has_key(parameter):\n\t\t\t\traise MissingParameterError(parameter)\n\n\t\treturn True",
"def check_params(self):\n raise NotImplementedError",
"def parameters_are_valid():\n # The only accepted number of command line arguments is 3: they are\n # aggregator.py, the filename, and the topic\n if len(sys.argv) != 3:\n # Issue error message if invalid number of command line arguments\n print(\"Error: invalid number of arguments\")\n print(\"Usage: aggregator.py filename topic\")\n return False\n else:\n return True",
"def cmd_needs_no_arg(self):\n self.respond(\"501 Syntax error: command does not accept arguments.\")",
"def check_arguments(self):\n # only four test operation is permitted, if given anything apart from this,\n # then it should print error message.\n if not (\n (self.args.file is None)\n and ((self.args.testfiles is None or self.args.hostname is None))\n ):\n action = None\n if self.set_action_cmd(action) is not None:\n # the operation is checked in above function\n return None\n\n self.logger.error(\n colorama.Fore.RED\n + \"Arguments not given correctly, Please refer help message\",\n extra=self.log_detail,\n )\n self.parser.print_help()\n sys.exit(1)"
] |
[
"0.6678303",
"0.65089667",
"0.6347127",
"0.6260354",
"0.623185",
"0.62158525",
"0.6201601",
"0.6174969",
"0.6122561",
"0.61169016",
"0.6064124",
"0.59828496",
"0.59544414",
"0.5933564",
"0.5917931",
"0.5912249",
"0.5910069",
"0.5893333",
"0.5893333",
"0.5892044",
"0.58627796",
"0.5858547",
"0.58373356",
"0.58371836",
"0.5823718",
"0.58113277",
"0.5808003",
"0.58056504",
"0.57835466",
"0.5777503"
] |
0.6989657
|
0
|
Internal function to ensure that the incoming `mc` object is valid and the same as the attached `mc` object in the context. If the incomding `mc` is not valid or is inconsistent with the `mc` in the context, raise a CLIInternalError.
|
def _ensure_mc(self, mc: ManagedCluster) -> None:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
if self.context.mc != mc:
raise CLIInternalError(
"Inconsistent state detected. The incoming `mc` is not the same as the `mc` in the context."
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _ensure_mc(self, mc: ManagedCluster) -> None:\n if not isinstance(mc, self.models.ManagedCluster):\n raise CLIInternalError(\n \"Unexpected mc object with type '{}'.\".format(type(mc))\n )\n\n if self.context.mc != mc:\n raise CLIInternalError(\n \"Inconsistent state detected. The incoming `mc` \"\n \"is not the same as the `mc` in the context.\"\n )",
"def _verifyConversion(self, imc_in, imc_out=None):\n if imc_in != const.IMC.GREATER_OF_TWO_HORIZONTAL and \\\n imc_in != const.IMC.MEDIAN_HORIZONTAL and \\\n imc_in != const.IMC.GMRotI50 and \\\n imc_in != const.IMC.RotD50 and \\\n imc_in != const.IMC.RANDOM_HORIZONTAL and \\\n imc_in != const.IMC.HORIZONTAL:\n raise ValueError('unknown IMC %r' % imc_in)",
"def attach_existing_mc(self, mc: ManagedCluster) -> None:\n if self.__existing_mc is None:\n self.__existing_mc = mc\n else:\n msg = \"the same\" if self.__existing_mc == mc else \"different\"\n raise CLIInternalError(\n \"Attempting to attach the existing `mc` object again, the two objects are {}.\".format(\n msg\n )\n )",
"def validate(self):\n if not self.hmc_address:\n raise ValueError(\"No HMC address provided\")\n if (not self.credentials['user']\n or not self.credentials['password']):\n raise ValueError(\n \"No CPC credentials set. Please provide 'admin-user' and \"\n \"'admin-password' in hypervisor profile\")\n if not self.boot_options:\n raise ValueError(\n \"No CPC boot method configured. Please set \"\n \"'liveimg-insfile-url' in CPC profile parameters or \"\n \"attach a volume with live image\")",
"def attach_mc(self, mc: ManagedCluster) -> None:\n if self.decorator_mode == DecoratorMode.UPDATE:\n self.attach_existing_mc(mc)\n\n if self.mc is None:\n self.mc = mc\n else:\n msg = \"the same\" if self.mc == mc else \"different\"\n raise CLIInternalError(\n \"Attempting to attach the `mc` object again, the two objects are {}.\".format(\n msg\n )\n )",
"def _check_validity(self):\n pass",
"def __verify_arguments(self):\n if len(self.__pointer_data) == 0:\n raise ValueError(\n \"Input data is empty (size: '%d').\" % len(self.__pointer_data)\n )\n\n if self.__number_clusters <= 0:\n raise ValueError(\n \"Amount of cluster (current value: '%d') for allocation should be greater than 0.\"\n % self.__number_clusters\n )\n\n if self.__numlocal < 0:\n raise ValueError(\n \"Local minima (current value: '%d') should be greater or equal to 0.\"\n % self.__numlocal\n )\n\n if self.__maxneighbor < 0:\n raise ValueError(\n \"Maximum number of neighbors (current value: '%d') should be greater or \"\n \"equal to 0.\" % self.__maxneighbor\n )",
"def _assert_valid_mu(self, mu):\n cov = self._cov\n if mu.dtype != cov.dtype:\n raise TypeError(\n \"mu and cov must have the same dtype. Found mu.dtype = %s, \"\n \"cov.dtype = %s\" % (mu.dtype, cov.dtype))\n\n # Try to validate with static checks.\n mu_shape = mu.get_shape()\n cov_shape = cov.get_shape()\n if mu_shape.is_fully_defined() and cov_shape.is_fully_defined():\n if mu_shape != cov_shape[:-1]:\n raise ValueError(\n \"mu.shape and cov.shape[:-1] should match. Found: mu.shape=%s, \"\n \"cov.shape=%s\" % (mu_shape, cov_shape))\n else:\n return mu\n\n # Static checks could not be run, so possibly do dynamic checks.\n if not self.validate_args:\n return mu\n else:\n assert_same_rank = check_ops.assert_equal(\n array_ops.rank(mu) + 1,\n cov.rank(),\n data=[\"mu should have rank 1 less than cov. Found: rank(mu) = \",\n array_ops.rank(mu), \" rank(cov) = \", cov.rank()],\n )\n with ops.control_dependencies([assert_same_rank]):\n assert_same_shape = check_ops.assert_equal(\n array_ops.shape(mu),\n cov.vector_shape(),\n data=[\"mu.shape and cov.shape[:-1] should match. \"\n \"Found: shape(mu) = \"\n , array_ops.shape(mu), \" shape(cov) = \", cov.shape()],\n )\n return control_flow_ops.with_dependencies([assert_same_shape], mu)",
"def __verify_arguments(self):\r\n if len(self.__pointer_data) == 0:\r\n raise ValueError(\"Input data is empty (size: '%d').\" % len(self.__pointer_data))\r\n\r\n if self.__number_clusters <= 0:\r\n raise ValueError(\"Amount of cluster (current value: '%d') for allocation should be greater than 0.\" %\r\n self.__number_clusters)\r\n\r\n if self.__numlocal < 0:\r\n raise ValueError(\"Local minima (current value: '%d') should be greater or equal to 0.\" % self.__numlocal)\r\n\r\n if self.__maxneighbor < 0:\r\n raise ValueError(\"Maximum number of neighbors (current value: '%d') should be greater or \"\r\n \"equal to 0.\" % self.__maxneighbor)",
"def sanity_check(self):\n pass",
"def test_verify_mol_input():\n # good input with extra spaces\n mol_input_dict = read_mol_input(os.path.join(TEST_DIR, \"example2_mol_input_file.txt\"))\n verify_mol_input(mol_input_dict)\n\n # check the good file\n # {'qcm': 'hf', 'basis': 'sto-3g', 'struct_input': 'C=CC=C',\n # 'struct_type': 'smiles', 'prog': 'psi4', 'charge': '0', 'multip': '1'}\n mol_input_dict = read_mol_input(os.path.join(TEST_DIR, \"example_mol_input_file.txt\"))\n verify_mol_input(mol_input_dict)\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n # struct input is spelt wrong\n mol_input_dict2 = read_mol_input(os.path.join(TEST_DIR, \"bad3_mol_input_file.txt\"))\n assert_raises(ValueError, verify_mol_input, mol_input_dict2)\n\n mol_input_dict[\"qcm\"] = \"not-a-method\"\n assert_raises(ValueError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"qcm\"] = \"hf\"\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"basis\"] = \"not-a-basis\"\n assert_raises(ValueError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"basis\"] = \"sto-3g\"\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"struct_input\"] = \"very-bad-smiles-string\"\n assert_raises(ValueError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"struct_input\"] = \"C=CC=C\"\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"struct_type\"] = \"not-an-option\"\n assert_raises(AssertionError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"struct_type\"] = \"smiles\"\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"prog\"] = \"unavailable-prog\"\n assert_raises(AssertionError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"prog\"] = \"psi4\"\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"charge\"] = \"0.34\"\n assert_raises(ValueError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"charge\"] = \"0\"\n # reset all params that are modified by verification\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"multip\"] = \"-2\"\n assert_raises(AssertionError, verify_mol_input, mol_input_dict)",
"def test_set_molecule_error(self):\n mol = Molecule.from_smiles(\"CCO\")\n atom = Atom(6, 0, False)\n atom.molecule = mol\n with pytest.raises(AssertionError, match=\"already has an associated molecule\"):\n atom.molecule = mol",
"def _validate_compatibility(self):\r\n for dm in self.DistanceMatrices:\r\n for samp_id in dm.ids:\r\n if samp_id not in self.MetadataMap.SampleIds:\r\n raise ValueError(\"The sample ID '%s' was not found in the \"\r\n \"metadata map.\" % samp_id)\r\n for cat in self.Categories:\r\n if cat not in self.MetadataMap.CategoryNames:\r\n raise ValueError(\"The category '%s' was not found in the \"\r\n \"metadata map.\" % cat)",
"def sanity_check(self):\n return True",
"def _check_consistency(message: Message, to: str, sender: str) -> Message:\n if message.has_to:\n enforce(\n message.to == to, \"To specified on message does not match envelope.\"\n )\n else:\n message.to = to\n if message.has_sender:\n enforce(\n message.sender == sender,\n \"Sender specified on message does not match envelope.\",\n )\n else:\n message.sender = sender\n return message",
"def _check_consistency(self) -> None:\n lbl_vals_from_metadata = set(self.infos.keys())\n lbl_vals_from_data = set(np.unique(self.data))\n # TODO: check if numerical datatype shenanigans ruin the day\n # i.e. something along the lines of 1.0 != 1\n symm_diff = lbl_vals_from_data ^ lbl_vals_from_metadata\n\n if len(symm_diff) != 0:\n msg = (f'Label mismatch between data and metadata! Expected vanishing '\n f'symmetric difference but got: {symm_diff}')\n raise ValueError(msg)",
"def raise_if_inconsistent(self):\n state = VMStateInconsistent()\n state.qemu = self.qemu.is_running()\n state.proc = bool(self.qemu.proc())\n state.ceph_lock = self.ceph.locked_by_me()\n self.log.debug(\n \"check-state-consistency\",\n is_consistent=state.is_consistent(),\n qemu=state.qemu,\n proc=state.proc,\n ceph_lock=state.ceph_lock,\n )\n if not state.is_consistent():\n raise state",
"def test_verifyDamaged(self):\n self.testObject.content.setContent('garbage!')\n self.assertRaises(CorruptObject, self.testObject.verify)",
"def test_validate_compatibility(self):\r\n self.assertEqual(self.cs_overview._validate_compatibility(), None)\r\n\r\n self.cs_overview.DistanceMatrices = [self.single_ele_dm]\r\n self.assertRaises(ValueError, self.cs_overview._validate_compatibility)\r\n\r\n self.cs_overview.DistanceMatrices = [self.overview_dm]\r\n self.cs_overview.MetadataMap = self.test_map\r\n self.assertRaises(ValueError, self.cs_overview._validate_compatibility)",
"def _raise_if_invalid(self):\n if self._stack_result == -1 and self._recm_data == -1:\n error_message = 'Worker result for request ID {} does not exist yet'.format(\n self.external_request_id)\n logger.exception(error_message)\n raise SARBRequestInvalidException(error_message)",
"def check_validity(self):",
"def test_machine_check(self):\n with raises(SyntaxError):\n self.machine.check()\n self.machine.add_state('0 ,R, ,R, ,R, a,N,!')\n self.machine.check() # without exception\n self.machine.add_state('1 ,L, ,L, ,L, a,R,2')\n with raises(SyntaxError):\n self.machine.check()\n self.machine.add_state('2 ,L, ,L, ,L, a,R,2')\n self.machine.check() # without exception\n\n with raises(SyntaxError):\n self.machine = Machine(['a'])\n\n self.machine = Machine(['a', '_'])\n self.machine.add_state('0 ,L, ,R,')\n with raises(SyntaxError):\n self.machine.check()",
"def test_object_provision_command_when_invalid_arguments_provided(mock_client):\n from IllumioCore import object_provision_command\n\n args = {\"security_policy_objects\": \"\"}\n err_msg = (\n \"security_policy_objects is a required parameter. Please provide correct value.\"\n )\n\n with pytest.raises(ValueError) as err:\n object_provision_command(mock_client, args)\n\n assert str(err.value) == err_msg",
"def validate_collision(self):\n pass",
"def check_errors(self) -> None:",
"def __validate():\n # TODO: implement",
"def _validate_materials(self):\n if not isinstance(self.materials, dict):\n raise securesystemslib.exceptions.FormatError(\n \"Invalid Link: field `materials` must be of type dict, got: {}\"\n .format(type(self.materials)))\n\n for material in list(self.materials.values()):\n securesystemslib.formats.HASHDICT_SCHEMA.check_match(material)",
"def _verify(self):\n pass",
"def validate(self):\n\n # Check if motherboard record exists\n motherboard_record_exists = False\n board_info_records = self.groups[constants.RecordType.BASEBOARD_RECORD]\n for handle_id in board_info_records:\n record = self.records[handle_id]\n if 'Type' in record.props and record.props['Type'].val == 'Motherboard':\n motherboard_record_exists = True\n break\n if not motherboard_record_exists:\n self.err_msgs['Motherboard SMBIOS record is missing.'] = (\n 'There should be at least one structure defining the motherboard '\n '(Board Type: 0xA).')\n\n return self.err_msgs",
"def assert_goodness(self):\n if self._setted:\n self.assert_stored_iss()\n self.assert_stored_ks()\n ## Check idxs\n self.assert_stored_idxs()\n ## Check sp_relative_pos\n self.assert_stored_sp_rel_pos()"
] |
[
"0.8108351",
"0.5865274",
"0.5721774",
"0.5506098",
"0.5488774",
"0.5384116",
"0.5317153",
"0.5299704",
"0.52886504",
"0.52636874",
"0.52195853",
"0.5134537",
"0.5118759",
"0.51167303",
"0.50530046",
"0.5004237",
"0.50021195",
"0.49870652",
"0.4983264",
"0.49633512",
"0.49613866",
"0.4939211",
"0.49313352",
"0.4909964",
"0.49053776",
"0.48865676",
"0.48826352",
"0.4881224",
"0.48626867",
"0.48503193"
] |
0.8137792
|
0
|
Get the ManagedCluster object currently in use and attach it to internal context. Internally send request using ContainerServiceClient and parameters name (cluster) and resource group name.
|
def fetch_mc(self) -> ManagedCluster:
mc = self.client.get(self.context.get_resource_group_name(), self.context.get_name())
# attach mc to AKSContext
self.context.attach_mc(mc)
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def init_mc(self) -> ManagedCluster:\n # Initialize a ManagedCluster object with mandatory parameter location.\n mc = self.models.ManagedCluster(\n location=self.context.get_location(),\n )\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def make_cluster_context(cluster, show_deleted=False):\n context = RequestContext(user_name=cluster.trustee_username,\n password=cluster.trustee_password,\n trust_id=cluster.trust_id,\n show_deleted=show_deleted,\n user_domain_id=CONF.trust.trustee_domain_id,\n user_domain_name=CONF.trust.trustee_domain_name)\n return context",
"def get_cluster(cluster_id: Optional[str] = None,\n location: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:\n __args__ = dict()\n __args__['clusterId'] = cluster_id\n __args__['location'] = location\n __args__['project'] = project\n if opts is None:\n opts = pulumi.InvokeOptions()\n if opts.version is None:\n opts.version = _utilities.get_version()\n __ret__ = pulumi.runtime.invoke('google-native:container/v1:getCluster', __args__, opts=opts, typ=GetClusterResult).value\n\n return AwaitableGetClusterResult(\n addons_config=__ret__.addons_config,\n authenticator_groups_config=__ret__.authenticator_groups_config,\n autopilot=__ret__.autopilot,\n autoscaling=__ret__.autoscaling,\n binary_authorization=__ret__.binary_authorization,\n cluster_ipv4_cidr=__ret__.cluster_ipv4_cidr,\n conditions=__ret__.conditions,\n confidential_nodes=__ret__.confidential_nodes,\n create_time=__ret__.create_time,\n current_master_version=__ret__.current_master_version,\n current_node_version=__ret__.current_node_version,\n database_encryption=__ret__.database_encryption,\n default_max_pods_constraint=__ret__.default_max_pods_constraint,\n description=__ret__.description,\n enable_kubernetes_alpha=__ret__.enable_kubernetes_alpha,\n enable_tpu=__ret__.enable_tpu,\n endpoint=__ret__.endpoint,\n expire_time=__ret__.expire_time,\n initial_cluster_version=__ret__.initial_cluster_version,\n ip_allocation_policy=__ret__.ip_allocation_policy,\n label_fingerprint=__ret__.label_fingerprint,\n legacy_abac=__ret__.legacy_abac,\n location=__ret__.location,\n locations=__ret__.locations,\n logging_config=__ret__.logging_config,\n logging_service=__ret__.logging_service,\n maintenance_policy=__ret__.maintenance_policy,\n master_auth=__ret__.master_auth,\n master_authorized_networks_config=__ret__.master_authorized_networks_config,\n mesh_certificates=__ret__.mesh_certificates,\n monitoring_config=__ret__.monitoring_config,\n monitoring_service=__ret__.monitoring_service,\n name=__ret__.name,\n network=__ret__.network,\n network_config=__ret__.network_config,\n network_policy=__ret__.network_policy,\n node_ipv4_cidr_size=__ret__.node_ipv4_cidr_size,\n node_pools=__ret__.node_pools,\n notification_config=__ret__.notification_config,\n private_cluster_config=__ret__.private_cluster_config,\n release_channel=__ret__.release_channel,\n resource_labels=__ret__.resource_labels,\n resource_usage_export_config=__ret__.resource_usage_export_config,\n self_link=__ret__.self_link,\n services_ipv4_cidr=__ret__.services_ipv4_cidr,\n shielded_nodes=__ret__.shielded_nodes,\n status=__ret__.status,\n subnetwork=__ret__.subnetwork,\n tpu_ipv4_cidr_block=__ret__.tpu_ipv4_cidr_block,\n vertical_pod_autoscaling=__ret__.vertical_pod_autoscaling,\n workload_identity_config=__ret__.workload_identity_config)",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.UPDATE\n )",
"def cluster_manager(self):\n # Lazily instantiate the cluster manager the first time it is asked for.\n if not hasattr(self, '_cluster_manager'):\n if self._cluster_engine:\n self._cluster_manager = self._cluster_engine.create_manager(\n self._username,\n self._tenancy\n )\n else:\n self._cluster_manager = None\n # If there is still no cluster manager, clusters are not supported\n if not self._cluster_manager:\n raise errors.UnsupportedOperationError(\n 'Clusters are not supported for this tenancy.'\n )\n return self._cluster_manager",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.CREATE\n )",
"def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))",
"def run(job=None, logger=None, **kwargs):\n environment = Environment.objects.get(id=ENV_ID)\n\n # Save cluster data on the resource so teardown works later\n create_required_parameters()\n resource = kwargs['resource']\n resource.create_gke_k8s_cluster_env = environment.id\n resource.create_gke_k8s_cluster_name = CLUSTER_NAME\n resource.name = CLUSTER_NAME\n resource.save()\n\n job.set_progress('Connecting to GKE...')\n builder = GKEClusterBuilder(environment, CLUSTER_NAME)\n\n job.set_progress('Sending request for new cluster {}...'.format(CLUSTER_NAME))\n builder.create_cluster(NODE_COUNT)\n\n job.set_progress('Waiting up to {} seconds for provisioning to complete.'\n .format(TIMEOUT))\n start = time.time()\n job.set_progress('Waiting for cluster IP address...')\n endpoint = builder.wait_for_endpoint(timeout=TIMEOUT)\n if not endpoint:\n return (\"FAILURE\",\n \"No IP address returned after {} seconds\".format(TIMEOUT),\n \"\")\n\n remaining_time = TIMEOUT - (time.time() - start)\n job.set_progress('Waiting for nodes to report hostnames...')\n nodes = builder.wait_for_nodes(NODE_COUNT, timeout=remaining_time)\n if len(nodes) < NODE_COUNT:\n return (\"FAILURE\",\n \"Nodes are not ready after {} seconds\".format(TIMEOUT),\n \"\")\n\n job.set_progress('Importing cluster...')\n cluster = builder.get_cluster()\n tech = ContainerOrchestratorTechnology.objects.get(name='Kubernetes')\n kubernetes = Kubernetes.objects.create(\n name=CLUSTER_NAME,\n ip=cluster['endpoint'],\n port=443,\n protocol='https',\n serviceaccount=cluster['masterAuth']['username'],\n servicepasswd=cluster['masterAuth']['password'],\n container_technology=tech,\n )\n resource.create_gke_k8s_cluster_id = kubernetes.id\n resource.save()\n url = 'https://{}{}'.format(\n PortalConfig.get_current_portal().domain,\n reverse('container_orchestrator_detail', args=[kubernetes.id])\n )\n job.set_progress(\"Cluster URL: {}\".format(url))\n\n job.set_progress('Importing nodes...')\n for node in nodes:\n # Generate libcloud UUID from GCE ID\n id_unicode = '{}:{}'.format(node['id'], 'gce')\n uuid = hashlib.sha1(id_unicode.encode('utf-8')).hexdigest()\n # Create a bbones server record. Other details like CPU and Mem Size\n # will be populated the next time the GCE handler is synced.\n Server.objects.create(\n hostname=node['name'],\n resource_handler_svr_id=uuid,\n environment=environment,\n resource_handler=environment.resource_handler,\n group=resource.group,\n owner=resource.owner,\n )\n\n job.set_progress('Waiting for cluster to report as running...')\n remaining_time = TIMEOUT - (time.time() - start)\n status = builder.wait_for_running_status(timeout=remaining_time)\n if status != 'RUNNING':\n return (\"FAILURE\",\n \"Status is {} after {} seconds (expected RUNNING)\".format(\n status, TIMEOUT),\n \"\")\n\n return (\"SUCCESS\",\n \"Cluster is ready and can be accessed at {}\".format(url),\n \"\")",
"def patch_cluster(self, cluster: Union[dto.Cluster, str]) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def cluster(self):\n return self._cluster",
"def cluster(self):\n return self._cluster",
"def patch_cluster(self, cluster, *args, **kwargs):\n raise NotImplementedError",
"def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp",
"def management_cluster(self) -> pulumi.Input['PrivateCloudManagementClusterArgs']:\n return pulumi.get(self, \"management_cluster\")",
"def authenticate_to_kusto_ingress(cluster):\n kcsb = KustoConnectionStringBuilder.with_aad_application_key_authentication(cluster,\n CLIENT_ID,\n CLIENT_SECRET,\n AUTHORITY_ID)\n # The authentication method will be taken from the chosen KustoConnectionStringBuilder.\n kusto_client = QueuedIngestClient(kcsb)\n return kusto_client",
"def cluster_start(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.start(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster start failed\")",
"def get(self, request, cluster_id, service_id): # pylint: disable=arguments-differ\n cluster = check_obj(Cluster, cluster_id, 'CLUSTER_NOT_FOUND')\n try:\n obj = self.get_queryset().get(cluster=cluster, id=service_id)\n except ClusterObject.DoesNotExist:\n raise AdcmApiEx('CLUSTER_SERVICE_NOT_FOUND')\n serializer_class = self.select_serializer(request)\n serializer = serializer_class(obj, context={'request': request, 'cluster_id': cluster_id})\n return Response(serializer.data)",
"def cluster(self, cluster):\n if self.local_vars_configuration.client_side_validation and cluster is None: # noqa: E501\n raise ValueError(\"Invalid value for `cluster`, must not be `None`\") # noqa: E501\n\n self._cluster = cluster",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def _get_hosts_with_container(self, context, cluster):\n pass",
"def get(self, request, cluster_id, upgrade_id): # pylint: disable=arguments-differ\n cluster = check_obj(Cluster, cluster_id, 'CLUSTER_NOT_FOUND')\n obj = self.get_queryset().get(id=upgrade_id)\n serializer = self.serializer_class(obj, context={\n 'cluster_id': cluster.id, 'request': request\n })\n return Response(serializer.data)",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def post(self, request, cluster_id):\n cluster = check_obj(Cluster, cluster_id, 'CLUSTER_NOT_FOUND')\n serializer = self.serializer_class(data=request.data, context={\n 'request': request, 'cluster': cluster,\n })\n return create(serializer, id=cluster_id)",
"def management_cluster(self) -> Optional[pulumi.Input['PrivateCloudManagementClusterArgs']]:\n return pulumi.get(self, \"management_cluster\")",
"def clustering(self) -> 'outputs.ClusteringResponse':\n return pulumi.get(self, \"clustering\")"
] |
[
"0.63290817",
"0.5981959",
"0.5886816",
"0.58823264",
"0.5875958",
"0.5780633",
"0.5742241",
"0.5623842",
"0.56223327",
"0.5598198",
"0.5598198",
"0.55960774",
"0.5590564",
"0.5503874",
"0.54916924",
"0.54552215",
"0.5428519",
"0.5419511",
"0.5408951",
"0.53970474",
"0.5395541",
"0.53725773",
"0.53725773",
"0.53725773",
"0.53725773",
"0.53725773",
"0.53725773",
"0.5368144",
"0.53667766",
"0.536114"
] |
0.6626831
|
0
|
Update agentpool profile for the ManagedCluster object.
|
def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
if not mc.agent_pool_profiles:
raise UnknownError(
"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of "
"updating agentpool profile."
)
agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)
mc.agent_pool_profiles[0] = agentpool_profile
# update nodepool labels for all nodepools
nodepool_labels = self.context.get_nodepool_labels()
if nodepool_labels is not None:
for agent_profile in mc.agent_pool_profiles:
agent_profile.node_labels = nodepool_labels
# update nodepool taints for all nodepools
nodepool_taints = self.context.get_nodepool_taints()
if nodepool_taints is not None:
for agent_profile in mc.agent_pool_profiles:
agent_profile.node_taints = nodepool_taints
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def update_pool(self, pool, body=None):\r\n return self.put(self.pool_path % (pool), body=body)",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def add_update_cluster(self, label, cluster):\n self._clusters[label] = cluster",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_cluster_config(self, clusterid, config, **kwargs):\n pass",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_disable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(\n enabled=False\n )\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set enabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n\n if self.context.get_disable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set disabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False\n\n return mc",
"def update_minion_pool():\n pool = fetch_minion_pool()\n save_minion_pool(pool)\n return pool",
"def associate_health_monitor(self, pool, body):\r\n return self.post(self.associate_pool_health_monitors_path % (pool),\r\n body=body)",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def update_network_plugin_settings(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n network_plugin_mode = self.context.get_network_plugin_mode()\n if network_plugin_mode:\n mc.network_profile.network_plugin_mode = network_plugin_mode\n\n (\n pod_cidr,\n _,\n _,\n _,\n _\n ) = self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n\n network_dataplane = self.context.get_network_dataplane()\n if network_dataplane:\n mc.network_profile.network_dataplane = network_dataplane\n\n if pod_cidr:\n mc.network_profile.pod_cidr = pod_cidr\n return mc",
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_pool(self, context, old_pool, pool):\n old_val, new_val = self.get_diff_of_dict(old_pool, pool)\n LOG.info(\"Received request 'Update Pool' for Pool:%(pool)s \"\n \"in LB:%(lb_id)s with new Param:%(new_val)s and \"\n \"old Param:%(old_val)s\",\n {'pool': pool['id'],\n 'lb_id': pool['loadbalancer_id'],\n 'old_val': old_val,\n 'new_val': new_val})\n arg_dict = {'context': context,\n lb_const.OLD_POOL: old_pool,\n lb_const.POOL: pool,\n }\n self._send_event(lb_const.EVENT_UPDATE_POOL_V2, arg_dict,\n serialize=True,\n binding_key=pool['loadbalancer_id'],\n key=pool['id'])",
"def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]",
"def test_update_pool(self):\r\n resource = 'pool'\r\n cmd = pool.UpdatePool(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })"
] |
[
"0.69000655",
"0.64854956",
"0.63575286",
"0.58744913",
"0.5846469",
"0.58256614",
"0.58208615",
"0.571694",
"0.56969243",
"0.5597279",
"0.55756265",
"0.5568938",
"0.55441463",
"0.55437326",
"0.54608375",
"0.5429033",
"0.5364649",
"0.5275306",
"0.5205076",
"0.5190666",
"0.5176698",
"0.51317275",
"0.511006",
"0.5100063",
"0.50945055",
"0.5072052",
"0.5071553",
"0.50215924",
"0.5016511",
"0.5011857"
] |
0.8161614
|
0
|
Update autoscaler profile for the ManagedCluster object.
|
def update_auto_scaler_profile(self, mc):
self._ensure_mc(mc)
cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()
if cluster_autoscaler_profile is not None:
# update profile (may clear profile with empty dictionary)
mc.auto_scaler_profile = cluster_autoscaler_profile
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_disable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(\n enabled=False\n )\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set enabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n\n if self.context.get_disable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set disabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False\n\n return mc",
"def set_up_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler(enabled=True)\n else:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def get_cluster_autoscaler_profile(self) -> Union[Dict[str, str], None]:\n return self._get_cluster_autoscaler_profile()",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def set_cluster_autoscaler(enabled, worker_pool_names=None, new_worker_pool_names=None):\n modified_pools = []\n if k8s.exists('configmap', 'kube-system', 'iks-ca-configmap'):\n config_map = k8s.get('configmap', 'kube-system', 'iks-ca-configmap')\n worker_pools_config = json.loads(config_map['data']['workerPoolsConfig.json'])\n rename_worker_pools = new_worker_pool_names and worker_pool_names and len(new_worker_pool_names) == len(worker_pool_names)\n for pool_config in worker_pools_config:\n if not worker_pool_names or pool_config['name'] in worker_pool_names:\n if rename_worker_pools:\n pool_config['name'] = new_worker_pool_names[worker_pool_names.index(pool_config['name'])]\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n elif pool_config['enabled'] != enabled:\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n if modified_pools:\n config_map['data']['workerPoolsConfig.json'] = json.dumps(worker_pools_config, ensure_ascii=False) # TODO: Remove ensure_ascii when migration to py3 is complete\n k8s.apply(config_map)\n else:\n logger.info('Cluster autoscaler is not present')\n return modified_pools",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def set_up_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_profile = None\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel:\n auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile(upgrade_channel=auto_upgrade_channel)\n mc.auto_upgrade_profile = auto_upgrade_profile\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def _get_cluster_autoscaler_profile(self, read_only: bool = False) -> Union[Dict[str, str], None]:\n # read the original value passed by the command\n cluster_autoscaler_profile = self.raw_param.get(\"cluster_autoscaler_profile\")\n # parse and validate user input\n cluster_autoscaler_profile = self.__validate_cluster_autoscaler_profile(cluster_autoscaler_profile)\n\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.auto_scaler_profile is not None:\n cluster_autoscaler_profile = self.mc.auto_scaler_profile\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return cluster_autoscaler_profile\n\n # dynamic completion for update mode only\n if not read_only and self.decorator_mode == DecoratorMode.UPDATE:\n if cluster_autoscaler_profile and self.mc and self.mc.auto_scaler_profile:\n # shallow copy should be enough for string-to-string dictionary\n copy_of_raw_dict = self.mc.auto_scaler_profile.__dict__.copy()\n new_options_dict = dict(\n (key.replace(\"-\", \"_\"), value)\n for (key, value) in cluster_autoscaler_profile.items()\n )\n copy_of_raw_dict.update(new_options_dict)\n cluster_autoscaler_profile = copy_of_raw_dict\n\n # this parameter does not need validation\n return cluster_autoscaler_profile",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_image_cleaner(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_image_cleaner = self.context.get_enable_image_cleaner()\n disable_image_cleaner = self.context.get_disable_image_cleaner()\n interval_hours = self.context.get_image_cleaner_interval_hours()\n\n # no image cleaner related changes\n if not enable_image_cleaner and not disable_image_cleaner and interval_hours is None:\n return mc\n\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n image_cleaner_profile = mc.security_profile.image_cleaner\n\n if image_cleaner_profile is None:\n image_cleaner_profile = self.models.ManagedClusterSecurityProfileImageCleaner()\n mc.security_profile.image_cleaner = image_cleaner_profile\n\n # init the image cleaner profile\n image_cleaner_profile.enabled = False\n image_cleaner_profile.interval_hours = 7 * 24\n\n if enable_image_cleaner:\n image_cleaner_profile.enabled = True\n\n if disable_image_cleaner:\n image_cleaner_profile.enabled = False\n\n if interval_hours is not None:\n image_cleaner_profile.interval_hours = interval_hours\n\n return mc",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def auto_scaling(self, auto_scaling):\n\n self.container['auto_scaling'] = auto_scaling",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def update_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def set_up_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n # set intermediate\n self.context.set_intermediate(\"azuremonitormetrics_addon_enabled\", True, overwrite_exists=True)\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError"
] |
[
"0.77689654",
"0.77025795",
"0.70554644",
"0.6538188",
"0.62741613",
"0.62367916",
"0.61821556",
"0.60954934",
"0.6095449",
"0.5962787",
"0.5960805",
"0.58981574",
"0.58537835",
"0.582746",
"0.5796772",
"0.57472837",
"0.5672821",
"0.5601652",
"0.5588318",
"0.5538377",
"0.54538774",
"0.5436312",
"0.5424376",
"0.5416659",
"0.5379041",
"0.5333468",
"0.5269032",
"0.5256693",
"0.5208395",
"0.5201541"
] |
0.8491165
|
0
|
Update tags for the ManagedCluster object.
|
def update_tags(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
tags = self.context.get_tags()
if tags is not None:
mc.tags = tags
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def AddClusterTags(self, tags, dry_run=False, reason=None):\n query = [(\"tag\", t) for t in tags]\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT, \"/%s/tags\" % GANETI_RAPI_VERSION,\n query, None)",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def cluster(self, clusters):\n cl = KMeansClustering(map(lambda x: (x), range(len(self.__tags))), self.__tagDistance)\n self.__clusters = cl.getclusters(clusters)\n return self.__clusters",
"def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]",
"def setTags(self,newtags):\n\t\tself.tags = newtags;",
"def set_tags(self, tags):\r\n current_tags = set(self.tag_names())\r\n updated_tags = set(tags)\r\n removed_tags = current_tags.difference(updated_tags)\r\n new_tags = updated_tags.difference(current_tags)\r\n \r\n for tag in new_tags:\r\n self.add_tag(tag)\r\n \r\n for tag in removed_tags:\r\n self.remove_tag(tag)",
"def add_update_cluster(self, label, cluster):\n self._clusters[label] = cluster",
"def clusters(self, clusters):\n\n self._clusters = clusters",
"def clusters(self, clusters):\n\n self._clusters = clusters",
"def clusters(self, clusters):\n\n self._clusters = clusters",
"def updateTag(self, authenticationToken, tag):\r\n pass",
"def modify_cube_tag(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n # build request\n d = {\n \"ProjectId\": self.config.project_id,\n \"Region\": self.config.region,\n }\n req and d.update(req)\n d = apis.ModifyCubeTagRequestSchema().dumps(d)\n\n # build options\n kwargs[\"max_retries\"] = 0 # ignore retry when api is not idempotent\n\n resp = self.invoke(\"ModifyCubeTag\", d, **kwargs)\n return apis.ModifyCubeTagResponseSchema().loads(resp)",
"def update_tags(self, tags, **kwargs):\n request = RequestMiddleware.get_request()\n is_admin = request.user and request.user.is_admin\n # Keep all tags that start with pf: because they are reserved.\n preserved = [tag for tag in self.tags if tag.startswith('pf:')]\n if is_admin:\n remove = [tag[1:] for tag in tags if tag.startswith('-pf:')]\n preserved = [tag for tag in preserved if tag not in remove]\n\n # Filter out new tags that are invalid or reserved.\n accepted = [tag for tag in tags\n if TAG_REGEX_COMPILED.match(tag)\n and (is_admin or not tag.startswith('pf:'))]\n # Limit the number of tags per entity.\n if len(accepted + preserved) > settings.MAX_TAGS_PER_ENTITY:\n accepted = accepted[:settings.MAX_TAGS_PER_ENTITY - len(preserved)]\n self.tags = list(set(accepted + preserved))",
"def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def update_from_tags():\n tags.update_diagrams()\n tags.update_tiles()",
"def tags_changed(self, tags):\n pass",
"def modify_devices_tags(self, data):\n data = clean(data, self.tags_parameters)\n return self.put(\"/devices/tags\", data)",
"def ModifyCluster(self, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n body = kwargs\n\n return self._SendRequest(HTTP_PUT,\n \"/%s/modify\" % GANETI_RAPI_VERSION, query, body)",
"def update_vsan_cluster(self, cluster_id, **kwargs):\n put_body = json.dumps({'cluster': kwargs})\n resp, body = self.put('clusters/%s' % cluster_id, put_body)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])",
"def update_cluster_config(self, clusterid, config, **kwargs):\n pass",
"def update_kubernetes_cluster(\n self,\n cluster: Union[dto.KubernetesCluster, str],\n template: Union[dto.KubernetesClusterTemplate, str]\n ) -> dto.KubernetesCluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def GetClusterTags(self, reason=None):\n query = []\n _AppendReason(query, reason)\n return self._SendRequest(HTTP_GET, \"/%s/tags\" % GANETI_RAPI_VERSION,\n query, None)",
"async def update_cached_tags(self, tag_list: list):\n if not self.__is_cache_supported:\n return\n tasks = []\n for tag in tag_list:\n tasks.append(self.update_tag_info(tag[\"id\"], tag[\"id_tag_info\"]))\n await asyncio.gather(*tasks)",
"def merge(self):\n # Find which clusters/rows to merge\n self.segsChanged = True\n tomerge = []\n i = 0\n for cbox in self.cboxes:\n if cbox.checkState() != 0:\n tomerge.append(i)\n i += 1\n print('rows/clusters to merge are:', tomerge)\n if len(tomerge) < 2:\n return\n\n # Generate new class labels\n nclasses = self.nclasses - len(tomerge) + 1\n max_label = nclasses - 1\n labels = []\n c = self.nclasses - 1\n while c > -1:\n if c in tomerge:\n labels.append((c, 0))\n else:\n labels.append((c, max_label))\n max_label -= 1\n c -= 1\n\n # print('[old, new] labels')\n labels = dict(labels)\n # print(labels)\n\n keys = [i for i in range(self.nclasses) if i not in tomerge] # the old keys those didn't merge\n # print('old keys left: ', keys)\n\n # update clusters dictionary {ID: cluster_name}\n clusters = {0: self.clusters[tomerge[0]]}\n for i in keys:\n clusters.update({labels[i]: self.clusters[i]})\n\n print('before update: ', self.clusters)\n self.clusters = clusters\n print('after update: ', self.clusters)\n\n self.nclasses = nclasses\n\n # update the segments\n for seg in self.segments:\n seg[-1] = labels[seg[-1]]\n\n # update the cluster combobox\n #self.cmbUpdateSeg.clear()\n #for x in self.clusters:\n #self.cmbUpdateSeg.addItem(self.clusters[x])\n\n # Clean and redraw\n self.clearButtons()\n self.updateButtons()\n self.completeChanged.emit()"
] |
[
"0.5990642",
"0.59193057",
"0.5559754",
"0.55428",
"0.54464996",
"0.5432885",
"0.5410724",
"0.54037637",
"0.54037637",
"0.54037637",
"0.5357456",
"0.5332834",
"0.53129476",
"0.52917403",
"0.5266721",
"0.5208107",
"0.5159976",
"0.513104",
"0.51148003",
"0.5085101",
"0.50813633",
"0.50640976",
"0.50640976",
"0.50640976",
"0.50640976",
"0.50640976",
"0.50640976",
"0.5043835",
"0.50430393",
"0.50413036"
] |
0.74483514
|
0
|
Attach or detach acr for the cluster. The function "ensure_aks_acr" will be called to create or delete an AcrPull role assignment for the acr, which internally used AuthorizationManagementClient to send the request.
|
def process_attach_detach_acr(self, mc: ManagedCluster) -> None:
self._ensure_mc(mc)
subscription_id = self.context.get_subscription_id()
assignee, is_service_principal = self.context.get_assignee_from_identity_or_sp_profile()
attach_acr = self.context.get_attach_acr()
detach_acr = self.context.get_detach_acr()
if attach_acr:
self.context.external_functions.ensure_aks_acr(
self.cmd,
assignee=assignee,
acr_name_or_id=attach_acr,
subscription_id=subscription_id,
is_service_principal=is_service_principal,
)
if detach_acr:
self.context.external_functions.ensure_aks_acr(
self.cmd,
assignee=assignee,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True,
is_service_principal=is_service_principal,
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_attach_acr(self, mc: ManagedCluster) -> None:\n self._ensure_mc(mc)\n\n attach_acr = self.context.get_attach_acr()\n if attach_acr:\n # If enable_managed_identity, attach acr operation will be handled after the cluster is created\n if not self.context.get_enable_managed_identity():\n service_principal_profile = mc.service_principal_profile\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=service_principal_profile.client_id,\n acr_name_or_id=attach_acr,\n # not actually used\n subscription_id=self.context.get_subscription_id(),\n )",
"def get_attach_acr(self) -> Union[str, None]:\n # read the original value passed by the command\n attach_acr = self.raw_param.get(\"attach_acr\")\n\n # this parameter does not need dynamic completion\n # validation\n if self.decorator_mode == DecoratorMode.CREATE and attach_acr:\n if self._get_enable_managed_identity(enable_validation=False):\n # Attach acr operation will be handled after the cluster is created\n if self.get_no_wait():\n raise MutuallyExclusiveArgumentError(\n \"When --attach-acr and --enable-managed-identity are both specified, \"\n \"--no-wait is not allowed, please wait until the whole operation succeeds.\"\n )\n else:\n # newly added check, check whether client_id exists before creating role assignment\n service_principal, _ = self._get_service_principal_and_client_secret(read_only=True)\n if not service_principal:\n raise RequiredArgumentMissingError(\n \"No service principal provided to create the acrpull role assignment for acr.\"\n )\n return attach_acr",
"def cluster_znode_acls(cluster_name, znode, headers=None):\n\n _zclient = get_client(cluster_name,\n headers or request.headers)\n if request.method == \"GET\":\n acls = _zclient.get_acls(znode)[0]\n return make_response(str(acls),\n 200)\n\n if request.method == \"PUT\":\n if request.content_type not in [\"text/plain\", \"text/xml\"]:\n return make_response(\"The Content-Type is not supported. \"\n \"Please use text/plain or text/xml. \\n\",\n 406)\n else:\n acls_raw = eval(request.data)\n acls_list = list()\n for _acl_raw in acls_raw:\n _acl_config = wildutils.ACLConfig(_acl_raw)\n acls_list.append(_acl_config.make_acl())\n\n zstat = _zclient.set_acls(znode, acls_list)\n data = {\"znodeStat\": wildutils.convert_zstat(zstat)}\n resp = Response(json.dumps(data),\n status=201,\n mimetype=\"application/json\")\n return resp",
"def enable_service_account(cfg: ElasticBlastConfig):\n dry_run = cfg.cluster.dry_run\n logging.debug(f\"Enabling service account\")\n with TemporaryDirectory() as d:\n set_extraction_path(d)\n rbac_yaml = resource_filename('elastic_blast', 'templates/elb-janitor-rbac.yaml')\n cmd = f\"kubectl --context={cfg.appstate.k8s_ctx} apply -f {rbac_yaml}\"\n if dry_run:\n logging.info(cmd)\n else:\n try:\n safe_exec(cmd)\n except:\n msg = 'ElasticBLAST is missing permissions for its auto-shutdown and cloud job submission feature. To provide these permissions, please run '\n msg += f'gcloud projects add-iam-policy-binding {cfg.gcp.project} --member={cfg.gcp.user} --role=roles/container.admin'\n raise UserReportError(returncode=PERMISSIONS_ERROR, message=msg)",
"def test_deleteAnnounceACP(self) -> None:\n\t\tif TestRemote_Annc.acp is None:\n\t\t\tself.skipTest('acp not found')\n\t\t_, rsc = DELETE(acpURL, ORIGINATOR)\n\t\tself.assertEqual(rsc, RC.deleted)\n\t\t# try to retrieve the announced Node. Should not be found\n\t\tr, rsc = RETRIEVE(f'{REMOTEURL}/~{TestRemote_Annc.remoteAcpRI}', CSEID)\n\t\tself.assertEqual(rsc, RC.notFound)\n\t\tTestRemote_Annc.acp = None\n\t\tTestRemote_Annc.remoteAcpRI = None",
"def attach_AC(self):\n n = self.pC - 1\n self.A[n] = self._mps_AC(self.A[n], self.C)",
"def _reload_acls(self):\n\t\tself.acls = ACLs()",
"def test_install_rbac_1(test_name, cloud_provider, rke_client, kubectl):\n rke_template = 'cluster_install_rbac_1.yml.j2'\n nodes = cloud_provider.create_multiple_nodes(3, test_name)\n create_rke_cluster(rke_client, kubectl, nodes, rke_template)\n\n result = kubectl.create_resourse_from_yml(\n 'resources/k8s_ymls/daemonset_pods_per_node.yml', namespace='default')\n assert result.ok, result.stderr\n kubectl.create_ns('outside-role')\n result = kubectl.create_resourse_from_yml(\n 'resources/k8s_ymls/daemonset_pods_per_node.yml',\n namespace='outside-role')\n assert result.ok, result.stderr\n\n # Create role and rolebinding to user1 in namespace 'default'\n # namespace is coded in role.yml and rolebinding.yml\n result = kubectl.create_resourse_from_yml('resources/k8s_ymls/role.yml')\n assert result.ok, result.stderr\n result = kubectl.create_resourse_from_yml(\n 'resources/k8s_ymls/rolebinding.yml')\n assert result.ok, result.stderr\n\n # verify read in namespace\n admin_call_pods = kubectl.get_resource('pods', namespace='default')\n user_call_pods = kubectl.get_resource(\n 'pods', as_user='user1', namespace='default')\n\n # Make sure the number of pods returned with out user is the same as user\n # for this namespace\n assert len(admin_call_pods['items']) > 0, \"Pods should be greater than 0\"\n assert (len(admin_call_pods['items']) == len(user_call_pods['items'])), (\n \"Did not retrieve correct number of pods for 'user1'. Expected {0},\"\n \"Retrieved {1}\".format(\n len(admin_call_pods['items']), len(user_call_pods['items'])))\n\n # verify restrictions no pods return in get pods in different namespaces\n user_call_pods = kubectl.get_resource(\n 'pods', as_user='user1', namespace='outside-role')\n assert len(user_call_pods['items']) == 0, (\n \"Should not be able to get pods outside of defined user1 namespace\")\n\n # verify create fails as user for any namespace\n result = kubectl.run(test_name + '-pod2', image='nginx', as_user='user1',\n namespace='outside-role')\n assert result.ok is False, (\n \"'user1' should not be able to create pods in other namespaces:\\n{0}\"\n .format(result.stdout + result.stderr))\n assert \"cannot create\" in result.stdout + result.stderr\n\n result = kubectl.run(test_name + '-pod3', image='nginx', as_user='user1',\n namespace='default')\n assert result.ok is False, (\n \"'user1' should not be able to create pods in its own namespace:\\n{0}\"\n .format(result.stdout + result.stderr))\n assert \"cannot create\" in result.stdout + result.stderr\n\n for node in nodes:\n cloud_provider.delete_node(node)",
"def acr(self) -> pulumi.Output[Optional['outputs.ACRResponse']]:\n return pulumi.get(self, \"acr\")",
"async def exchange_acr_refresh_token_for_acr_access_token(\n self,\n service: str,\n scope: str,\n refresh_token: str,\n grant_type: Union[str, \"_models.TokenGrantType\"] = \"refresh_token\",\n **kwargs: Any\n ) -> _models.AcrAccessToken:\n error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = case_insensitive_dict(kwargs.pop(\"headers\", {}) or {})\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n api_version = kwargs.pop(\"api_version\", _params.pop(\"api-version\", \"2021-07-01\")) # type: str\n content_type = kwargs.pop(\n \"content_type\", _headers.pop(\"Content-Type\", \"application/x-www-form-urlencoded\")\n ) # type: Optional[str]\n cls = kwargs.pop(\"cls\", None) # type: ClsType[_models.AcrAccessToken]\n\n # Construct form data\n _data = {\n \"service\": service,\n \"scope\": scope,\n \"refresh_token\": refresh_token,\n \"grant_type\": grant_type,\n }\n\n request = build_exchange_acr_refresh_token_for_acr_access_token_request(\n api_version=api_version,\n content_type=content_type,\n data=_data,\n headers=_headers,\n params=_params,\n )\n path_format_arguments = {\n \"url\": self._serialize.url(\"self._config.url\", self._config.url, \"str\", skip_quote=True),\n }\n request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore\n\n pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access\n request, stream=False, **kwargs\n )\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n deserialized = self._deserialize(\"AcrAccessToken\", pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized",
"def test_basic_rebalance(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers)\n self.log.info(\"Manifest #########\\n {0}\".format(json.dumps(self.x509.manifest, indent=4)))\n for server in self.servers:\n _ = self.x509.upload_root_certs(server)\n self.x509.upload_node_certs(servers=self.servers)\n self.x509.delete_unused_out_of_the_box_CAs(server=self.master)\n self.x509.upload_client_cert_settings(server=self.servers[0])\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(self.servers[:self.nodes_init],\n self.servers[self.nodes_init:], [])\n\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n self.log.info(\"Checking authentication ...\")\n self.auth(servers=self.servers)\n\n content = self.x509.get_trusted_CAs()\n self.log.info(\"Trusted CAs: {0}\".format(content))\n self.log.info(\"Active Root CAs names {0}\".format(self.x509.root_ca_names))",
"def test_create_describe_delete_acls(kafka_admin_client):\n\n # Check that we don't have any ACLs in the cluster\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0\n\n # Try to add an ACL\n acl = ACL(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n result = kafka_admin_client.create_acls([acl])\n\n assert len(result[\"failed\"]) == 0\n assert len(result[\"succeeded\"]) == 1\n\n # Check that we can list the ACL we created\n acl_filter = ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n acls, error = kafka_admin_client.describe_acls(acl_filter)\n\n assert error is NoError\n assert len(acls) == 1\n\n # Remove the ACL\n delete_results = kafka_admin_client.delete_acls(\n [\n ACLFilter(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n ]\n )\n\n assert len(delete_results) == 1\n assert len(delete_results[0][1]) == 1 # Check number of affected ACLs\n\n # Make sure the ACL does not exist in the cluster anymore\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=\"*\",\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0",
"def test_create_describe_delete_acls(kafka_admin_client):\n\n # Check that we don't have any ACLs in the cluster\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0\n\n # Try to add an ACL\n acl = ACL(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n result = kafka_admin_client.create_acls([acl])\n\n assert len(result[\"failed\"]) == 0\n assert len(result[\"succeeded\"]) == 1\n\n # Check that we can list the ACL we created\n acl_filter = ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n acls, error = kafka_admin_client.describe_acls(acl_filter)\n\n assert error is NoError\n assert len(acls) == 1\n\n # Remove the ACL\n delete_results = kafka_admin_client.delete_acls(\n [\n ACLFilter(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n ]\n )\n\n assert len(delete_results) == 1\n assert len(delete_results[0][1]) == 1 # Check number of affected ACLs\n\n # Make sure the ACL does not exist in the cluster anymore\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=\"*\",\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0",
"def init_acs_agent(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def enable_acm_fullaccess(self):\n self._request({\"enable-acm-fullaccess\": True})",
"def ac_dc(self, ac_dc):\n\n self._ac_dc = ac_dc",
"def test_retrieveAnnouncedAEwithATwithAA(self) -> None:\n\t\tif TestRemote_Annc.remoteAeRI is None:\n\t\t\tself.skipTest('remote AE.ri not found')\n\t\tr, rsc = RETRIEVE(f'{REMOTEURL}/~{TestRemote_Annc.remoteAeRI}', CSEID)\n\t\tself.assertEqual(rsc, RC.OK)\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:aeA'))\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:aeA/ty'))\n\t\tself.assertEqual(findXPath(r, 'm2m:aeA/ty'), T.AEAnnc)\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:aeA/ct'))\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:aeA/lt'))\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:aeA/et'))\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:aeA/pi'))\n\t\tself.assertTrue(CSEID.endswith(findXPath(r, 'm2m:aeA/pi')))\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:aeA/lnk'))\n\t\tself.assertTrue(findXPath(r, 'm2m:aeA/lnk').endswith( findXPath(TestRemote_Annc.ae, 'm2m:ae/ri') ))\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:aeA/lbl'))\n\t\tself.assertEqual(len(findXPath(r, 'm2m:aeA/lbl')), 1)\n\t\tself.assertIn('aLabel', findXPath(r, 'm2m:aeA/lbl'))\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:aeA/srv'))\t# MA attribute\n\t\tself.assertEqual(findXPath(r, 'm2m:aeA/srv'), [ '3' ])",
"def test_createAnnounceAEwithATwithAA(self) -> None:\n\t\tdct = \t{ 'm2m:ae' : {\n\t\t\t\t\t'rn': \taeRN, \n\t\t\t\t\t'api': \t'NMyApp1Id',\n\t\t\t\t \t'rr': \tFalse,\n\t\t\t\t \t'srv': \t[ '3' ],\n\t\t\t\t \t'lbl':\t[ 'aLabel'],\n\t\t\t\t \t'at': \t[ REMOTECSEID ],\n\t\t\t\t \t'aa': \t[ 'lbl' ]\n\t\t\t\t}}\n\t\tr, rsc = CREATE(cseURL, 'C', T.AE, dct)\n\t\tself.assertEqual(rsc, RC.created)\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:ae/lbl'))\n\t\tself.assertIsInstance(findXPath(r, 'm2m:ae/lbl'), list)\n\t\tself.assertEqual(len(findXPath(r, 'm2m:ae/lbl')), 1)\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:ae/at'))\n\t\tself.assertIsInstance(findXPath(r, 'm2m:ae/at'), list)\n\t\tself.assertEqual(len(findXPath(r, 'm2m:ae/at')), 1)\n\t\tself.assertIsInstance(findXPath(r, 'm2m:ae/aa'), list)\n\t\tself.assertEqual(len(findXPath(r, 'm2m:ae/aa')), 1)\n\t\tself.assertIn('lbl', findXPath(r, 'm2m:ae/aa'))\n\t\tself.assertTrue(findXPath(r, 'm2m:ae/at')[0].startswith(f'{REMOTECSEID}/'))\n\t\tTestRemote_Annc.remoteAeRI = findXPath(r, 'm2m:ae/at')[0]\n\t\tself.assertIsNotNone(self.remoteAeRI)\n\t\tTestRemote_Annc.ae = r",
"def acr(self) -> Optional[pulumi.Input['ACRArgs']]:\n return pulumi.get(self, \"acr\")",
"def test_createAnnounceAEwithATwithoutAA(self) -> None:\n\t\tdct = \t{ 'm2m:ae' : {\n\t\t\t\t\t'rn': \taeRN, \n\t\t\t\t\t'api': \t'NMyApp1Id',\n\t\t\t\t \t'rr': \tFalse,\n\t\t\t\t \t'srv': \t[ '3' ],\n\t\t\t\t \t'at': \t[ REMOTECSEID ]\n\t\t\t\t}}\n\t\tr, rsc = CREATE(cseURL, 'C', T.AE, dct)\n\t\tself.assertEqual(rsc, RC.created)\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:ae/at'))\n\t\tself.assertIsInstance(findXPath(r, 'm2m:ae/at'), list)\n\t\tself.assertEqual(len(findXPath(r, 'm2m:ae/at')), 1)\n\t\tself.assertTrue(findXPath(r, 'm2m:ae/at')[0].startswith(f'{REMOTECSEID}/'))\n\t\tTestRemote_Annc.remoteAeRI = findXPath(r, 'm2m:ae/at')[0]\n\t\tself.assertIsNotNone(self.remoteAeRI)\n\t\tself.assertIsNone(findXPath(r, 'm2m:ae/aa'))\n\t\tTestRemote_Annc.ae = r",
"def set_up_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n aad_profile = None\n enable_aad = self.context.get_enable_aad()\n if enable_aad:\n aad_profile = self.models.ManagedClusterAADProfile(\n managed=True,\n enable_azure_rbac=self.context.get_enable_azure_rbac(),\n # ids -> i_ds due to track 2 naming issue\n admin_group_object_i_ds=self.context.get_aad_admin_group_object_ids(),\n tenant_id=self.context.get_aad_tenant_id()\n )\n else:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = (\n self.context.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()\n )\n aad_tenant_id = self.context.get_aad_tenant_id()\n if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):\n aad_profile = self.models.ManagedClusterAADProfile(\n client_app_id=aad_client_app_id,\n server_app_id=aad_server_app_id,\n server_app_secret=aad_server_app_secret,\n tenant_id=aad_tenant_id\n )\n mc.aad_profile = aad_profile\n return mc",
"def immediate_processing_after_request(self, mc: ManagedCluster) -> None:\n # vnet\n need_grant_vnet_permission_to_cluster_identity = self.context.get_intermediate(\n \"need_post_creation_vnet_permission_granting\", default_value=False\n )\n if need_grant_vnet_permission_to_cluster_identity:\n # Grant vnet permission to system assigned identity RIGHT AFTER the cluster is put, this operation can\n # reduce latency for the role assignment take effect\n instant_cluster = self.client.get(self.context.get_resource_group_name(), self.context.get_name())\n if not self.context.external_functions.add_role_assignment(\n self.cmd,\n \"Network Contributor\",\n instant_cluster.identity.principal_id,\n scope=self.context.get_vnet_subnet_id(),\n is_service_principal=False,\n ):\n logger.warning(\n \"Could not create a role assignment for subnet. Are you an Owner on this subscription?\"\n )",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def configAdcAsic(self,Adcasic_regs):\n pass",
"def legacy_abac(self) -> 'outputs.LegacyAbacResponse':\n return pulumi.get(self, \"legacy_abac\")",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif (\n self.context.raw_param.get(\"enable_addons\") is not None or\n self.context.raw_param.get(\"disable_addons\") is not None\n ):\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = check_is_msi_cluster(cluster)\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )",
"def configure_aaa_authorization_network(device, server_grp, server_grp_name):\n logger.info(f\"Configuring aaa authorization network group\")\n\n configs= f\"aaa authorization network {server_grp} group {server_grp_name}\"\n\n try:\n device.configure(configs)\n except SubCommandFailure as e:\n raise SubCommandFailure(f\"Could not configure aaa authorization group methods. Error:\\n{e}\")",
"def update_account(self):\n self.on_ready.send(self)\n arns = self._get_arns()\n\n if not arns:\n self.current_app.logger.warn(\"Zero ARNs collected. Exiting\")\n exit(-1)\n\n client = self._get_client()\n try:\n details = self._call_access_advisor(client, list(arns))\n except Exception as e:\n self.on_failure.send(self, error=e)\n self.current_app.logger.exception('Failed to call access advisor', exc_info=True)\n return 255, None\n else:\n self.on_complete.send(self)\n return 0, details",
"def publish_endpoint_acls(self, endpoint_uuid, acls):\n log.info(\"Publishing ACL Update %s for %s\" % (acls, endpoint_uuid))\n update = {\"type\": \"ACLUPDATE\",\n \"issued\": time.time() * 1000,\n \"acls\": acls}\n self.pub_lock.acquire()\n self.pub_socket.send_multipart([endpoint_uuid.encode(\"utf-8\"),\n json.dumps(update).encode(\"utf-8\")])\n self.pub_lock.release()",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif self.context.raw_param.get(\"enable_addons\") is not None:\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = self.context.get_enable_managed_identity()\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )\n\n # azure monitor metrics addon (v2)\n azuremonitormetrics_addon_enabled = self.context.get_intermediate(\n \"azuremonitormetrics_addon_enabled\",\n default_value=False\n )\n if azuremonitormetrics_addon_enabled:\n # Create the DC* objects, AMW, recording rules and grafana link here\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n True\n )"
] |
[
"0.70333576",
"0.6105166",
"0.52634287",
"0.49878967",
"0.4939688",
"0.49266833",
"0.48635554",
"0.47929135",
"0.47830227",
"0.469469",
"0.46504804",
"0.45948967",
"0.45948967",
"0.45925927",
"0.45897233",
"0.45892283",
"0.45535025",
"0.4539842",
"0.45309758",
"0.4507648",
"0.45009944",
"0.44665396",
"0.4441233",
"0.44244358",
"0.44169995",
"0.441366",
"0.44118673",
"0.4408624",
"0.44035488",
"0.4391144"
] |
0.6910099
|
1
|
Update sku (uptime sla) for the ManagedCluster object.
|
def update_sku(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
# Premium without LTS is ok (not vice versa)
if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:
mc.sku = self.models.ManagedClusterSKU(
name="Base",
tier="Premium"
)
if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:
mc.sku = self.models.ManagedClusterSKU(
name="Base",
tier="Standard"
)
if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:
mc.sku = self.models.ManagedClusterSKU(
name="Base",
tier="Free"
)
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n return mc",
"def sku(self, sku):\n\n self._sku = sku",
"def add_update_cluster(self, label, cluster):\n self._clusters[label] = cluster",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def version_recluster(self, serial, cluster,\n update_statistics_ancestors_depth=None):\n\n props = self.version_get_properties(serial)\n if not props:\n return\n node = props[NODE]\n size = props[SIZE]\n oldcluster = props[CLUSTER]\n if cluster == oldcluster:\n return\n\n mtime = time()\n self.statistics_update_ancestors(node, -1, -size, mtime, oldcluster,\n update_statistics_ancestors_depth)\n self.statistics_update_ancestors(node, 1, size, mtime, cluster,\n update_statistics_ancestors_depth)\n\n q = \"update versions set cluster = ? where serial = ?\"\n self.execute(q, (cluster, serial))",
"def sku(self):\n return self._sku",
"def update_vsan_cluster(self, cluster_id, **kwargs):\n put_body = json.dumps({'cluster': kwargs})\n resp, body = self.put('clusters/%s' % cluster_id, put_body)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])",
"def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_disable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(\n enabled=False\n )\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set enabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n\n if self.context.get_disable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set disabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False\n\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def task_product_upshelf_update_productskusalestats(sku_id):\n from shopback.items.models import ProductSku, SkuStock, \\\n ProductSkuSaleStats, gen_productsksalestats_unikey\n sku = ProductSku.objects.get(id=sku_id)\n product_id = sku.product_id\n sku_stats = SkuStock.get_by_sku(sku_id)\n wait_assign_num = sku_stats.wait_assign_num\n\n stats_uni_key = gen_productsksalestats_unikey(sku_id)\n stats = ProductSkuSaleStats.objects.filter(uni_key=stats_uni_key, sku_id=sku_id)\n\n if stats.count() == 0:\n try:\n stat = ProductSkuSaleStats(uni_key=stats_uni_key,\n sku_id=sku_id,\n product_id=product_id,\n init_waitassign_num=wait_assign_num,\n sale_start_time=sku.product.upshelf_time,\n sale_end_time=sku.product.offshelf_time)\n stat.save()\n except IntegrityError as exc:\n logger.warn(\n \"IntegrityError - productskusalestat/init_waitassign_num | sku_id: %s, init_waitassign_num: %s\" % (\n sku_id, wait_assign_num))\n raise task_product_upshelf_update_productskusalestats.retry(exc=exc)\n else:\n logger.warn(\"RepeatUpshelf- productskusalestat/init_waitassign_num | sku_id: %s, init_waitassign_num: %s\" % (\n sku_id, wait_assign_num))",
"def cluster_uuid(self, cluster_uuid):\n\n self._cluster_uuid = cluster_uuid",
"def put(self, sku, page=None):\n put_data = api_parser.parse_args()\n product = Product.query.filter(Product.sku == put_data['sku']).first_or_404()\n product.name = put_data['name']\n product.description = put_data.get('description')\n product.is_active = put_data.get('is_active')\n db.session.add(product)\n db.session.commit()\n\n return marshal(product, product_fields), 200",
"def sku(self) -> 'outputs.SkuResponse':\n return pulumi.get(self, \"sku\")",
"def sku(self) -> 'outputs.SkuResponse':\n return pulumi.get(self, \"sku\")",
"def test_cluster_downscale(self):\n logging.info(\"Adding units needed for downscaling test.\")\n self._add_unit(2)\n\n # Remove unit hosting at least one follower\n non_leader_unit = self._get_unit_hosting_ovn(leader=False)\n logging.info(\n \"Removing unit (%s) that hosts OVN follower server.\",\n non_leader_unit\n )\n\n non_leader_sb, non_leader_nb = self._get_server_ids(non_leader_unit)\n self._remove_unit(non_leader_unit)\n self._assert_servers_cleanly_removed(non_leader_sb, non_leader_nb)\n\n # Remove unit hosting at least one leader\n leader_unit = self._get_unit_hosting_ovn(leader=True)\n logging.info(\n \"Removing unit (%s) that hosts OVN leader server.\",\n leader_unit\n )\n\n leader_sb, leader_nb = self._get_server_ids(leader_unit)\n self._remove_unit(leader_unit)\n self._assert_servers_cleanly_removed(leader_sb, leader_nb)",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def sku(self) -> pulumi.Output['outputs.SkuResponse']:\n return pulumi.get(self, \"sku\")",
"def resize_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n new_cluster_config = {\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].patch(\n **new_cluster_config)\n pprint(cluster.data)",
"def update_k8s_support_plan(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n support_plan = self.context.get_k8s_support_plan()\n if support_plan == KubernetesSupportPlan.AKS_LONG_TERM_SUPPORT:\n if mc is None or mc.sku is None or mc.sku.tier.lower() != CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM.lower():\n raise AzCLIError(\"Long term support is only available for premium tier clusters.\")\n\n mc.support_plan = support_plan\n return mc",
"def apply_maintenance_update(self):\n logger.info(\"Applying maintenance updates on master node\")\n self.env.admin_install_updates()\n\n logger.info(\"Applying maintenance updates on slaves\")\n slaves_mu_script_url = (\n \"https://github.com/Mirantis/tools-sustaining/\"\n \"raw/master/scripts/mos_apply_mu.py\")\n\n path_to_mu_script = \"/tmp/mos_apply_mu.py\"\n\n with self.env.d_env.get_admin_remote() as remote:\n remote.check_call(\"wget {uri} -O {path}\".format(\n uri=slaves_mu_script_url,\n path=path_to_mu_script)\n )\n\n remote.check_call(\n \"python {path} \"\n \"--env-id={identifier} \"\n \"--user={username} \"\n \"--pass={password} \"\n \"--tenant={tenant_name} --update\".format(\n path=path_to_mu_script,\n identifier=self.cluster_id,\n **conf.KEYSTONE_CREDS\n )\n )\n\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, roles=['controller', ])\n\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, roles=['compute', ])\n\n logger.info(\"Restarting all OpenStack services\")\n\n logger.info(\"Restarting services on controllers\")\n ha_services = (\n \"p_heat-engine\",\n \"p_neutron-plugin-openvswitch-agent\",\n \"p_neutron-dhcp-agent\",\n \"p_neutron-metadata-agent\",\n \"p_neutron-l3-agent\")\n non_ha_services = (\n \"heat-api-cloudwatch\",\n \"heat-api-cfn\",\n \"heat-api\",\n \"cinder-api\",\n \"cinder-scheduler\",\n \"nova-objectstore\",\n \"nova-cert\",\n \"nova-api\",\n \"nova-consoleauth\",\n \"nova-conductor\",\n \"nova-scheduler\",\n \"nova-novncproxy\",\n \"neutron-server\",\n )\n for controller in controllers:\n with self.fuel_web.get_ssh_for_nailgun_node(\n controller) as remote:\n for service in ha_services:\n remote_ops.manage_pacemaker_service(remote, service)\n for service in non_ha_services:\n remote_ops.manage_service(remote, service)\n\n logger.info(\"Restarting services on computes\")\n compute_services = (\n \"neutron-plugin-openvswitch-agent\",\n \"nova-compute\",\n )\n for compute in computes:\n with self.fuel_web.get_ssh_for_nailgun_node(compute) as remote:\n for service in compute_services:\n remote_ops.manage_service(remote, service)",
"def update_cluster(project_id, location, realm_id, cluster_id):\n\n client = gaming.GameServerClustersServiceClient()\n\n request = game_server_clusters.UpdateGameServerClusterRequest(\n game_server_cluster=game_server_clusters.GameServerCluster(\n name=f\"projects/{project_id}/locations/{location}/realms/{realm_id}/gameServerClusters/{cluster_id}\",\n labels={\"label-key-1\": \"label-value-1\", \"label-key-2\": \"label-value-2\"},\n ),\n update_mask=field_mask.FieldMask(paths=[\"labels\"]),\n )\n\n operation = client.update_game_server_cluster(request)\n print(f\"Update cluster operation: {operation.operation.name}\")\n operation.result(timeout=120)",
"def test_put_small_and_light_enrollment_by_seller_sku(self):\n pass",
"def task_product_downshelf_update_productskusalestats(sku_id, sale_end_time):\n from shopback.items.models import ProductSku, SkuStock, \\\n ProductSkuSaleStats, gen_productsksalestats_unikey\n\n product = ProductSku.objects.get(id=sku_id).product\n stats_uni_key = gen_productsksalestats_unikey(sku_id)\n stats = ProductSkuSaleStats.objects.filter(uni_key=stats_uni_key, sku_id=sku_id)\n\n if stats.count() > 0:\n try:\n stat = stats[0]\n if not stat.sale_end_time:\n stat.sale_end_time = stat.product.offshelf_time\n stat.status = ProductSkuSaleStats.ST_FINISH\n stat.save(update_fields=[\"sale_end_time\", \"status\"])\n except IntegrityError as exc:\n logger.warn(\"IntegrityError - productskusalestat/init_waitassign_num | sku_id: %s, sale_end_time: %s\" % (\n sku_id, sale_end_time))\n raise task_product_downshelf_update_productskusalestats.retry(exc=exc)\n else:\n logger.warn(\"RepeatDownshelf- productskusalestat/init_waitassign_num | sku_id: %s, sale_end_time: %s\" % (\n sku_id, sale_end_time))",
"def set_scaling_factor(c, v, data_objects=True, overwrite=True):\n if isinstance(c, (float, int)):\n # property packages can return 0 for material balance terms on components\n # doesn't exist. This handles the case where you get a constant 0 and\n # need its scale factor to scale the mass balance.\n return 1\n try:\n suf = c.parent_block().scaling_factor\n\n except AttributeError:\n c.parent_block().scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT)\n suf = c.parent_block().scaling_factor\n\n if not overwrite:\n try:\n tmp = suf[c] # pylint: disable=unused-variable\n # Able to access suffix value for c, so return without setting scaling factor\n return\n except KeyError:\n # No value exists yet for c, go ahead and set it\n pass\n suf[c] = v\n if data_objects and c.is_indexed():\n for cdat in c.values():\n if not overwrite:\n try:\n tmp = suf[cdat]\n continue\n except KeyError:\n pass\n suf[cdat] = v",
"def sku(self) -> pulumi.Input['LabVirtualMachineSkuArgs']:\n return pulumi.get(self, \"sku\")"
] |
[
"0.71179074",
"0.5984935",
"0.53230107",
"0.5303793",
"0.5204448",
"0.5096391",
"0.5087994",
"0.5086891",
"0.502062",
"0.49859262",
"0.4938367",
"0.49240932",
"0.48839968",
"0.48643538",
"0.48471776",
"0.48239538",
"0.47839066",
"0.47805685",
"0.47805685",
"0.4759781",
"0.47589308",
"0.47311994",
"0.4710352",
"0.4709261",
"0.46969",
"0.46773174",
"0.466876",
"0.4668144",
"0.46554798",
"0.4653241"
] |
0.7960321
|
0
|
Update outbound type of network profile for the ManagedCluster object.
|
def update_outbound_type_in_network_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
outboundType = self.context.get_outbound_type()
if outboundType:
vnet_subnet_id = self.context.get_vnet_subnet_id()
if vnet_subnet_id is None and outboundType not in [
CONST_OUTBOUND_TYPE_LOAD_BALANCER,
CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY,
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING
]:
raise InvalidArgumentValueError("Invalid outbound type, supported values are loadBalancer,"
" managedNATGateway, userAssignedNATGateway and userDefinedRouting.")
if vnet_subnet_id is not None and outboundType not in [
CONST_OUTBOUND_TYPE_LOAD_BALANCER,
CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY,
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING
]:
raise InvalidArgumentValueError("Invalid outbound type, supported values are loadBalancer,"
" managedNATGateway, userAssignedNATGateway and userDefinedRouting.")
mc.network_profile.outbound_type = outboundType
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_nat_gateway_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Unexpectedly get an empty network profile in the process of updating nat gateway profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY:\n mc.network_profile.nat_gateway_profile = None\n else:\n mc.network_profile.nat_gateway_profile = _update_nat_gateway_profile(\n self.context.get_nat_gateway_managed_outbound_ip_count(),\n self.context.get_nat_gateway_idle_timeout(),\n mc.network_profile.nat_gateway_profile,\n models=self.models.nat_gateway_models,\n )\n return mc",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def get_outbound_type(\n self,\n load_balancer_profile: ManagedClusterLoadBalancerProfile = None\n ) -> Union[str, None]:\n return self._get_outbound_type(\n enable_validation=True, load_balancer_profile=load_balancer_profile\n )",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def update_network_plugin_settings(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n network_plugin_mode = self.context.get_network_plugin_mode()\n if network_plugin_mode:\n mc.network_profile.network_plugin_mode = network_plugin_mode\n\n (\n pod_cidr,\n _,\n _,\n _,\n _\n ) = self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n\n network_dataplane = self.context.get_network_dataplane()\n if network_dataplane:\n mc.network_profile.network_dataplane = network_dataplane\n\n if pod_cidr:\n mc.network_profile.pod_cidr = pod_cidr\n return mc",
"def managed_outbound_ip_profile(self) -> Optional[pulumi.Input['ManagedClusterManagedOutboundIPProfileArgs']]:\n return pulumi.get(self, \"managed_outbound_ip_profile\")",
"def cluster_type(self, cluster_type):\n\n self._cluster_type = cluster_type",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def set_up_network_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # build load balancer profile, which is part of the network profile\n load_balancer_profile = create_load_balancer_profile(\n self.context.get_load_balancer_managed_outbound_ip_count(),\n self.context.get_load_balancer_managed_outbound_ipv6_count(),\n self.context.get_load_balancer_outbound_ips(),\n self.context.get_load_balancer_outbound_ip_prefixes(),\n self.context.get_load_balancer_outbound_ports(),\n self.context.get_load_balancer_idle_timeout(),\n models=self.models.load_balancer_models,\n )\n\n # verify outbound type\n # Note: Validation internally depends on load_balancer_sku, which is a temporary value that is\n # dynamically completed.\n outbound_type = self.context.get_outbound_type(\n load_balancer_profile=load_balancer_profile\n )\n\n # verify load balancer sku\n load_balancer_sku = safe_lower(self.context.get_load_balancer_sku())\n\n # verify network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy\n network_plugin = self.context.get_network_plugin()\n network_plugin_mode = self.context.get_network_plugin_mode()\n (\n pod_cidr,\n service_cidr,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n ) = (\n self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n )\n network_profile = None\n # set up pod_cidrs, service_cidrs and ip_families\n (\n pod_cidrs,\n service_cidrs,\n ip_families\n ) = (\n self.context.get_pod_cidrs_and_service_cidrs_and_ip_families()\n )\n\n network_dataplane = self.context.get_network_dataplane()\n\n if any(\n [\n network_plugin,\n network_plugin_mode,\n pod_cidr,\n pod_cidrs,\n service_cidr,\n service_cidrs,\n ip_families,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n network_dataplane,\n ]\n ):\n # Attention: RP would return UnexpectedLoadBalancerSkuForCurrentOutboundConfiguration internal server error\n # if load_balancer_sku is set to basic and load_balancer_profile is assigned.\n # Attention: SDK provides default values for pod_cidr, service_cidr, dns_service_ip, docker_bridge_cidr\n # and outbound_type, and they might be overwritten to None.\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=network_plugin,\n network_plugin_mode=network_plugin_mode,\n pod_cidr=pod_cidr,\n pod_cidrs=pod_cidrs,\n service_cidr=service_cidr,\n service_cidrs=service_cidrs,\n ip_families=ip_families,\n dns_service_ip=dns_service_ip,\n docker_bridge_cidr=docker_bridge_address,\n network_policy=network_policy,\n network_dataplane=network_dataplane,\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n else:\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_STANDARD or load_balancer_profile:\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=\"kubenet\",\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_BASIC:\n # load balancer sku must be standard when load balancer profile is provided\n network_profile = self.models.ContainerServiceNetworkProfile(\n load_balancer_sku=load_balancer_sku,\n )\n\n # build nat gateway profile, which is part of the network profile\n nat_gateway_profile = create_nat_gateway_profile(\n self.context.get_nat_gateway_managed_outbound_ip_count(),\n self.context.get_nat_gateway_idle_timeout(),\n models=self.models.nat_gateway_models,\n )\n load_balancer_sku = self.context.get_load_balancer_sku()\n if load_balancer_sku != CONST_LOAD_BALANCER_SKU_BASIC:\n network_profile.nat_gateway_profile = nat_gateway_profile\n mc.network_profile = network_profile\n return mc",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def update_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def set_output_type_device_settings(): # pylint: disable=E0211\n data_in = request.get_json()\n data_out = copy.deepcopy(data_in)\n\n if not Executer.instance.device_settings_executer.validate_data_in(data_in, (\"device\", \"output_type_key\", \"settings\", )):\n return \"Input data are wrong.\", 403\n\n Executer.instance.device_settings_executer.set_output_type_device_setting(data_in[\"device\"], data_in[\"output_type_key\"], data_in[\"settings\"])\n\n return jsonify(data_out)",
"def update_zcs_settings(session, network, lowport, highport,\n return_type=None, **kwargs):\n verify_low_high_port(lowport, highport)\n\n body_values = {'network': network, 'lowport': lowport,\n 'highport': highport}\n\n path = '/api/settings/container_service.json'\n\n return session.post_api(path=path, body=body_values,\n return_type=return_type, **kwargs)",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def add_update_cluster(self, label, cluster):\n self._clusters[label] = cluster",
"def _output_update(self):\n self._outputtype = self.inputs.outputtype",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def putProfile(profileType,value):\n # PUT /profile/$profileType\n pass",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc"
] |
[
"0.59787434",
"0.5740378",
"0.5654729",
"0.56542915",
"0.5408262",
"0.5325254",
"0.53181076",
"0.5237229",
"0.5191053",
"0.5164368",
"0.5101257",
"0.50718784",
"0.5071083",
"0.4919225",
"0.49110246",
"0.48976603",
"0.48807123",
"0.48262355",
"0.47698545",
"0.46744728",
"0.4651114",
"0.46258062",
"0.4623607",
"0.46198168",
"0.45699197",
"0.4560235",
"0.4548376",
"0.4524793",
"0.45045942",
"0.44940946"
] |
0.820513
|
0
|
Update load balancer profile for the ManagedCluster object.
|
def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
if not mc.network_profile:
raise UnknownError(
"Encounter an unexpected error while getting network profile from the cluster in the process of "
"updating its load balancer profile."
)
outbound_type = self.context.get_outbound_type()
if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:
mc.network_profile.load_balancer_profile = None
else:
load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()
load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()
load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()
load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()
load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()
load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()
# In the internal function "_update_load_balancer_profile", it will check whether the provided parameters
# have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will
# remain unchanged.
mc.network_profile.load_balancer_profile = _update_load_balancer_profile(
managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,
managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,
outbound_ips=load_balancer_outbound_ips,
outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,
outbound_ports=load_balancer_outbound_ports,
idle_timeout=load_balancer_idle_timeout,
profile=mc.network_profile.load_balancer_profile,
models=self.models.load_balancer_models)
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def set_up_network_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # build load balancer profile, which is part of the network profile\n load_balancer_profile = create_load_balancer_profile(\n self.context.get_load_balancer_managed_outbound_ip_count(),\n self.context.get_load_balancer_managed_outbound_ipv6_count(),\n self.context.get_load_balancer_outbound_ips(),\n self.context.get_load_balancer_outbound_ip_prefixes(),\n self.context.get_load_balancer_outbound_ports(),\n self.context.get_load_balancer_idle_timeout(),\n models=self.models.load_balancer_models,\n )\n\n # verify outbound type\n # Note: Validation internally depends on load_balancer_sku, which is a temporary value that is\n # dynamically completed.\n outbound_type = self.context.get_outbound_type(\n load_balancer_profile=load_balancer_profile\n )\n\n # verify load balancer sku\n load_balancer_sku = safe_lower(self.context.get_load_balancer_sku())\n\n # verify network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy\n network_plugin = self.context.get_network_plugin()\n network_plugin_mode = self.context.get_network_plugin_mode()\n (\n pod_cidr,\n service_cidr,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n ) = (\n self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n )\n network_profile = None\n # set up pod_cidrs, service_cidrs and ip_families\n (\n pod_cidrs,\n service_cidrs,\n ip_families\n ) = (\n self.context.get_pod_cidrs_and_service_cidrs_and_ip_families()\n )\n\n network_dataplane = self.context.get_network_dataplane()\n\n if any(\n [\n network_plugin,\n network_plugin_mode,\n pod_cidr,\n pod_cidrs,\n service_cidr,\n service_cidrs,\n ip_families,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n network_dataplane,\n ]\n ):\n # Attention: RP would return UnexpectedLoadBalancerSkuForCurrentOutboundConfiguration internal server error\n # if load_balancer_sku is set to basic and load_balancer_profile is assigned.\n # Attention: SDK provides default values for pod_cidr, service_cidr, dns_service_ip, docker_bridge_cidr\n # and outbound_type, and they might be overwritten to None.\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=network_plugin,\n network_plugin_mode=network_plugin_mode,\n pod_cidr=pod_cidr,\n pod_cidrs=pod_cidrs,\n service_cidr=service_cidr,\n service_cidrs=service_cidrs,\n ip_families=ip_families,\n dns_service_ip=dns_service_ip,\n docker_bridge_cidr=docker_bridge_address,\n network_policy=network_policy,\n network_dataplane=network_dataplane,\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n else:\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_STANDARD or load_balancer_profile:\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=\"kubenet\",\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_BASIC:\n # load balancer sku must be standard when load balancer profile is provided\n network_profile = self.models.ContainerServiceNetworkProfile(\n load_balancer_sku=load_balancer_sku,\n )\n\n # build nat gateway profile, which is part of the network profile\n nat_gateway_profile = create_nat_gateway_profile(\n self.context.get_nat_gateway_managed_outbound_ip_count(),\n self.context.get_nat_gateway_idle_timeout(),\n models=self.models.nat_gateway_models,\n )\n load_balancer_sku = self.context.get_load_balancer_sku()\n if load_balancer_sku != CONST_LOAD_BALANCER_SKU_BASIC:\n network_profile.nat_gateway_profile = nat_gateway_profile\n mc.network_profile = network_profile\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def pre_loadbalancer_member_update(self, resource_id, resource_dict):\n pass",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def load_balancer_profile(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileArgs']]:\n return pulumi.get(self, \"load_balancer_profile\")",
"def update_loadbalancer(self, context, lb, old):\n LOG.debug(\"\\nupdate_loadbalancer({}): called\".format(lb.id))\n hostnames = self._get_hostname(lb)\n # Update the TrafficIP group\n vapv = self._get_vapv(hostnames)\n # Update allowed_address_pairs\n if not old or lb.vip_address != old.vip_address:\n for hostname in hostnames:\n port_ids = self.openstack_connector.get_server_port_ids(\n hostname\n )\n self.openstack_connector.add_ip_to_ports(\n lb.vip_address, port_ids\n )\n # Update bandwidth allocation\n if old is not None and old.bandwidth != lb.bandwidth:\n self._update_instance_bandwidth(hostnames, lb.bandwidth)",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def add_update_cluster(self, label, cluster):\n self._clusters[label] = cluster",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_disable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(\n enabled=False\n )\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set enabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n\n if self.context.get_disable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set disabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False\n\n return mc",
"def update_metadata(self, loadbalancer, metadata):\n return loadbalancer.update_metadata(metadata)",
"def update(self,\n tier1_id,\n segment_id,\n segment_monitoring_profile_binding_map_id,\n segment_monitoring_profile_binding_map,\n ):\n return self._invoke('update',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'segment_monitoring_profile_binding_map_id': segment_monitoring_profile_binding_map_id,\n 'segment_monitoring_profile_binding_map': segment_monitoring_profile_binding_map,\n })",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_loadbalancer(request, **kwargs):\n data = request.DATA\n loadbalancer_id = kwargs.get('loadbalancer_id')\n\n conn = get_sdk_connection(request)\n loadbalancer = conn.load_balancer.update_load_balancer(\n loadbalancer_id,\n name=data['loadbalancer'].get('name'),\n description=data['loadbalancer'].get('description'),\n admin_state_up=data['loadbalancer'].get('admin_state_up'))\n\n return _get_sdk_object_dict(loadbalancer)",
"def post_loadbalancer_member_update(self, resource_id, resource_dict):\n pass",
"def update_loadbalancer(self, context, old_loadbalancer, loadbalancer):\n old_val, new_val = self.get_diff_of_dict(\n old_loadbalancer, loadbalancer)\n arg_dict = {'context': context,\n lb_const.OLD_LOADBALANCER: old_loadbalancer,\n lb_const.LOADBALANCER: loadbalancer,\n }\n LOG.info(\"Received request 'Update Loadbalancer' for LB:%(lb)s \"\n \"with new Param:%(new_val)s and old Param:%(old_val)s\",\n {'lb': loadbalancer['id'],\n 'new_val': new_val,\n 'old_val': old_val})\n self._send_event(lb_const.EVENT_UPDATE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])"
] |
[
"0.65942985",
"0.63839215",
"0.6343197",
"0.62864435",
"0.61292905",
"0.61204284",
"0.6031443",
"0.57443124",
"0.5726644",
"0.5723703",
"0.5628799",
"0.5628109",
"0.5623104",
"0.56101996",
"0.56002855",
"0.5590744",
"0.5588446",
"0.55560887",
"0.55289423",
"0.55235827",
"0.55140424",
"0.54861814",
"0.54826444",
"0.5442348",
"0.5390174",
"0.5388949",
"0.5349063",
"0.5316235",
"0.53040147",
"0.5261137"
] |
0.75255644
|
0
|
Update nat gateway profile for the ManagedCluster object.
|
def update_nat_gateway_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
if not mc.network_profile:
raise UnknownError(
"Unexpectedly get an empty network profile in the process of updating nat gateway profile."
)
outbound_type = self.context.get_outbound_type()
if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY:
mc.network_profile.nat_gateway_profile = None
else:
mc.network_profile.nat_gateway_profile = _update_nat_gateway_profile(
self.context.get_nat_gateway_managed_outbound_ip_count(),
self.context.get_nat_gateway_idle_timeout(),
mc.network_profile.nat_gateway_profile,
models=self.models.nat_gateway_models,
)
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def update_network_plugin_settings(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n network_plugin_mode = self.context.get_network_plugin_mode()\n if network_plugin_mode:\n mc.network_profile.network_plugin_mode = network_plugin_mode\n\n (\n pod_cidr,\n _,\n _,\n _,\n _\n ) = self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n\n network_dataplane = self.context.get_network_dataplane()\n if network_dataplane:\n mc.network_profile.network_dataplane = network_dataplane\n\n if pod_cidr:\n mc.network_profile.pod_cidr = pod_cidr\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_nat(self, natgw, **attrs):\n return self._update(_gw.Service, natgw, **attrs)",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def nat_gateway_profile(self) -> Optional[pulumi.Input['ManagedClusterNATGatewayProfileArgs']]:\n return pulumi.get(self, \"nat_gateway_profile\")",
"def update(self, remote_gateway_info):\n\n self.touch()\n # remember, don't flag connection stats as worthy of a change.\n self.msg.conn_stats = remote_gateway_info.conn_stats\n # self.msg.last_connection_timestamp = rospy.Time.now() # do we really need this?\n\n # don't update every client, just the ones that we need information from\n important_state = (self.state == ConcertClient.State.AVAILABLE) or (self.state == ConcertClient.State.MISSING)\n is_changed = False\n return is_changed",
"def update_cluster_config(self, clusterid, config, **kwargs):\n pass",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def do_update(self):\n params = self.inputs\n new_profile_id = params.get('new_profile_id', None)\n if new_profile_id and new_profile_id == self.entity.profile_id:\n params.pop('new_profile_id')\n\n if not params:\n return self.RES_OK, 'No property to update.'\n\n res = self.entity.do_update(self.context, params)\n if res:\n return self.RES_OK, 'Node updated successfully.'\n else:\n return self.RES_ERROR, 'Node update failed.'",
"def update(self) -> None:\n self._gateway.update()",
"def update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass",
"def update(self, obj):\n\n self.cfg.update(obj)",
"def fusion_api_update_hypervisor_host_profile(self, uri=None, body=None, api=None, headers=None):\n return self.host_profile.update(body, uri, api, headers)",
"def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def set_up_network_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # build load balancer profile, which is part of the network profile\n load_balancer_profile = create_load_balancer_profile(\n self.context.get_load_balancer_managed_outbound_ip_count(),\n self.context.get_load_balancer_managed_outbound_ipv6_count(),\n self.context.get_load_balancer_outbound_ips(),\n self.context.get_load_balancer_outbound_ip_prefixes(),\n self.context.get_load_balancer_outbound_ports(),\n self.context.get_load_balancer_idle_timeout(),\n models=self.models.load_balancer_models,\n )\n\n # verify outbound type\n # Note: Validation internally depends on load_balancer_sku, which is a temporary value that is\n # dynamically completed.\n outbound_type = self.context.get_outbound_type(\n load_balancer_profile=load_balancer_profile\n )\n\n # verify load balancer sku\n load_balancer_sku = safe_lower(self.context.get_load_balancer_sku())\n\n # verify network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy\n network_plugin = self.context.get_network_plugin()\n network_plugin_mode = self.context.get_network_plugin_mode()\n (\n pod_cidr,\n service_cidr,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n ) = (\n self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n )\n network_profile = None\n # set up pod_cidrs, service_cidrs and ip_families\n (\n pod_cidrs,\n service_cidrs,\n ip_families\n ) = (\n self.context.get_pod_cidrs_and_service_cidrs_and_ip_families()\n )\n\n network_dataplane = self.context.get_network_dataplane()\n\n if any(\n [\n network_plugin,\n network_plugin_mode,\n pod_cidr,\n pod_cidrs,\n service_cidr,\n service_cidrs,\n ip_families,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n network_dataplane,\n ]\n ):\n # Attention: RP would return UnexpectedLoadBalancerSkuForCurrentOutboundConfiguration internal server error\n # if load_balancer_sku is set to basic and load_balancer_profile is assigned.\n # Attention: SDK provides default values for pod_cidr, service_cidr, dns_service_ip, docker_bridge_cidr\n # and outbound_type, and they might be overwritten to None.\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=network_plugin,\n network_plugin_mode=network_plugin_mode,\n pod_cidr=pod_cidr,\n pod_cidrs=pod_cidrs,\n service_cidr=service_cidr,\n service_cidrs=service_cidrs,\n ip_families=ip_families,\n dns_service_ip=dns_service_ip,\n docker_bridge_cidr=docker_bridge_address,\n network_policy=network_policy,\n network_dataplane=network_dataplane,\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n else:\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_STANDARD or load_balancer_profile:\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=\"kubenet\",\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_BASIC:\n # load balancer sku must be standard when load balancer profile is provided\n network_profile = self.models.ContainerServiceNetworkProfile(\n load_balancer_sku=load_balancer_sku,\n )\n\n # build nat gateway profile, which is part of the network profile\n nat_gateway_profile = create_nat_gateway_profile(\n self.context.get_nat_gateway_managed_outbound_ip_count(),\n self.context.get_nat_gateway_idle_timeout(),\n models=self.models.nat_gateway_models,\n )\n load_balancer_sku = self.context.get_load_balancer_sku()\n if load_balancer_sku != CONST_LOAD_BALANCER_SKU_BASIC:\n network_profile.nat_gateway_profile = nat_gateway_profile\n mc.network_profile = network_profile\n return mc",
"def update(self,\n tier1_id,\n segment_id,\n segment_monitoring_profile_binding_map_id,\n segment_monitoring_profile_binding_map,\n ):\n return self._invoke('update',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'segment_monitoring_profile_binding_map_id': segment_monitoring_profile_binding_map_id,\n 'segment_monitoring_profile_binding_map': segment_monitoring_profile_binding_map,\n })",
"def update_outbound_type_in_network_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n outboundType = self.context.get_outbound_type()\n if outboundType:\n vnet_subnet_id = self.context.get_vnet_subnet_id()\n if vnet_subnet_id is None and outboundType not in [\n CONST_OUTBOUND_TYPE_LOAD_BALANCER,\n CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY,\n CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING\n ]:\n raise InvalidArgumentValueError(\"Invalid outbound type, supported values are loadBalancer,\"\n \" managedNATGateway, userAssignedNATGateway and userDefinedRouting.\")\n if vnet_subnet_id is not None and outboundType not in [\n CONST_OUTBOUND_TYPE_LOAD_BALANCER,\n CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY,\n CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING\n ]:\n raise InvalidArgumentValueError(\"Invalid outbound type, supported values are loadBalancer,\"\n \" managedNATGateway, userAssignedNATGateway and userDefinedRouting.\")\n mc.network_profile.outbound_type = outboundType\n return mc"
] |
[
"0.6426489",
"0.63450545",
"0.62319547",
"0.60947186",
"0.60823864",
"0.59361637",
"0.5891273",
"0.5854507",
"0.5761657",
"0.57524437",
"0.5678059",
"0.56612414",
"0.5579701",
"0.53702694",
"0.5347543",
"0.53397816",
"0.5326909",
"0.52681994",
"0.5237096",
"0.5205934",
"0.5189557",
"0.51673347",
"0.5137816",
"0.5116576",
"0.5092821",
"0.50831664",
"0.5056484",
"0.5051025",
"0.5033694",
"0.50081414"
] |
0.7545268
|
0
|
Update disable/enable local accounts for the ManagedCluster object.
|
def update_disable_local_accounts(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
if self.context.get_disable_local_accounts():
mc.disable_local_accounts = True
if self.context.get_enable_local_accounts():
mc.disable_local_accounts = False
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def local(self):\n logging.info(__name__ + ' : Set control to local & locked')\n self.set_remote_status(0)",
"def do_set_online(self):\n controller_name = self._tel.csp.controller\n controller = con_config.get_device_proxy(controller_name, fast_load=True)\n self._log(f\"Setting adminMode for {controller_name} to '0' (ONLINE)\")\n controller.write_attribute(\"adminmode\", 0)\n for index in range(1, self.nr_of_subarrays + 1):\n subarray_name = self._tel.csp.subarray(index)\n subarray = con_config.get_device_proxy(subarray_name, fast_load=True)\n self._log(f\"Setting adminMode for {subarray_name} to '0' (ONLINE)\")\n subarray.write_attribute(\"adminmode\", 0)",
"def undo_set_online(self):\n controller_name = self._tel.csp.controller\n controller = con_config.get_device_proxy(controller_name, fast_load=True)\n self._log(f\"Setting adminMode for {controller_name} to '1' (OFFLINE)\")\n controller.write_attribute(\"adminmode\", 1)\n for index in range(1, self.nr_of_subarrays + 1):\n subarray_name = self._tel.csp.subarray(index)\n subarray = con_config.get_device_proxy(subarray_name, fast_load=True)\n self._log(f\"Setting adminMode for {subarray_name} to '1' (OFFLINE)\")\n subarray.write_attribute(\"adminmode\", 1)",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\n \"CONST_MONITORING_ADDON_NAME\"\n )\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\n \"CONST_INGRESS_APPGW_ADDON_NAME\"\n )\n CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get(\n \"CONST_VIRTUAL_NODE_ADDON_NAME\"\n )\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n azure_keyvault_secrets_provider_addon_profile = None\n if mc.addon_profiles is not None:\n monitoring_addon_enabled = (\n CONST_MONITORING_ADDON_NAME in mc.addon_profiles and\n mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled\n )\n ingress_appgw_addon_enabled = (\n CONST_INGRESS_APPGW_ADDON_NAME in mc.addon_profiles and\n mc.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled\n )\n virtual_node_addon_enabled = (\n CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type() in mc.addon_profiles and\n mc.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type()].enabled\n )\n # set intermediates, used later to ensure role assignments\n self.context.set_intermediate(\n \"monitoring_addon_enabled\", monitoring_addon_enabled, overwrite_exists=True\n )\n self.context.set_intermediate(\n \"ingress_appgw_addon_enabled\", ingress_appgw_addon_enabled, overwrite_exists=True\n )\n self.context.set_intermediate(\n \"virtual_node_addon_enabled\", virtual_node_addon_enabled, overwrite_exists=True\n )\n # get azure keyvault secrets provider profile\n azure_keyvault_secrets_provider_addon_profile = mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n )\n\n # update azure keyvault secrets provider profile\n azure_keyvault_secrets_provider_addon_profile = (\n self.update_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n if azure_keyvault_secrets_provider_addon_profile:\n # mc.addon_profiles should not be None if azure_keyvault_secrets_provider_addon_profile is not None\n mc.addon_profiles[\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ] = azure_keyvault_secrets_provider_addon_profile\n return mc",
"def update_azure_keyvault_kms(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_azure_keyvault_kms():\n # get kms profile\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n azure_key_vault_kms_profile = mc.security_profile.azure_key_vault_kms\n if azure_key_vault_kms_profile is None:\n azure_key_vault_kms_profile = self.models.AzureKeyVaultKms()\n mc.security_profile.azure_key_vault_kms = azure_key_vault_kms_profile\n\n # set enabled\n azure_key_vault_kms_profile.enabled = True\n # set key id\n azure_key_vault_kms_profile.key_id = self.context.get_azure_keyvault_kms_key_id()\n # set network access, should never be None for now, can be safely assigned, temp fix for rp\n # the value is obtained from user input or backfilled from existing mc or to default value\n azure_key_vault_kms_profile.key_vault_network_access = (\n self.context.get_azure_keyvault_kms_key_vault_network_access()\n )\n # set key vault resource id\n if azure_key_vault_kms_profile.key_vault_network_access == CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE:\n azure_key_vault_kms_profile.key_vault_resource_id = (\n self.context.get_azure_keyvault_kms_key_vault_resource_id()\n )\n else:\n azure_key_vault_kms_profile.key_vault_resource_id = \"\"\n\n if self.context.get_disable_azure_keyvault_kms():\n # get kms profile\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n azure_key_vault_kms_profile = mc.security_profile.azure_key_vault_kms\n if azure_key_vault_kms_profile is None:\n azure_key_vault_kms_profile = self.models.AzureKeyVaultKms()\n mc.security_profile.azure_key_vault_kms = azure_key_vault_kms_profile\n\n # set enabled to False\n azure_key_vault_kms_profile.enabled = False\n\n return mc",
"def set_chassis_cluster_disable(self):\n return self.dev.rpc.set_chassis_cluster_disable(\n reboot=True, normalize=True)",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def set_chassis_cluster_enable(self, cluster_id, node_id):\n return self.dev.rpc.set_chassis_cluster_enable(\n cluster_id=cluster_id, node=node_id,\n reboot=True, normalize=True)",
"def remote(self):\n logging.info(__name__ + ' : Set control to remote & locked')\n self.set_remote_status(1)",
"def accountcontrol(lp, creds, username=None, value=0):\n\n names = guess_names_from_smbconf(lp, None, None)\n db = Ldb(url=get_ldb_url(lp, creds, names), session_info=system_session(), \n credentials=creds, lp=lp)\n user_dn = get_user_dn(db, \"CN=Users,%s\" % names.domaindn, username)\n extended_user = \"\"\"\ndn: %s\nchangetype: modify\nreplace: msExchUserAccountControl\nmsExchUserAccountControl: %d\n\"\"\" % (user_dn, value)\n db.modify_ldif(extended_user)\n if value == 2:\n print \"[+] Account %s disabled\" % username\n else:\n print \"[+] Account %s enabled\" % username",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_cluster_config(self, clusterid, config, **kwargs):\n pass",
"def cluster_resource_whitelist(self, cluster_resource_whitelist):\n\n self._cluster_resource_whitelist = cluster_resource_whitelist",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def set_into_managed_mode(wifi_name):\n \n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"down\"])\n subprocess.run([\"iwconfig\", wifi_name, \"mode\", \"managed\"])\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"up\"])\n subprocess.run([\"service\", \"NetworkManager\", \"start\"])",
"def update_network_plugin_settings(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n network_plugin_mode = self.context.get_network_plugin_mode()\n if network_plugin_mode:\n mc.network_profile.network_plugin_mode = network_plugin_mode\n\n (\n pod_cidr,\n _,\n _,\n _,\n _\n ) = self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n\n network_dataplane = self.context.get_network_dataplane()\n if network_dataplane:\n mc.network_profile.network_dataplane = network_dataplane\n\n if pod_cidr:\n mc.network_profile.pod_cidr = pod_cidr\n return mc",
"def __update_accounts(self):\n\t\tfor acct in self.wallet:\n\t\t\tif len(get_unspent(acct[\"address\"], self.testnet))!=0:\n\t\t\t\tacct[\"status\"] = \"in use\"\n\t\t\telse:\n\t\t\t\tspent = get_spent(acct[\"address\"], self.testnet)\n\t\t\t\tconfirm = (s[\"confirmations\"] >= 6 for s in spent)\n\t\t\t\tif len(spent) > 0 and all(confirm):\n\t\t\t\t\tacct[\"status\"] = \"used\"\n\t\t\t\telif len(spent) > 0:\n\t\t\t\t\tacct[\"status\"] = \"in use\"\n\t\tself.header[\"LAST_UPDATE_TIME\"] = str(round(time.time()))\n\t\toutput = [self.header, *self.wallet]\n\t\twith open(self.filepath, 'w+') as f:\n\t\t\tjson.dump(output, f)",
"def maintainance(self, on_off, instance_type):\n print((\"enabling\" if on_off else \"disabling\") + \" Maintainer mode\")\n tries = 60\n while True:\n reply = self.send_request(\n instance_type,\n requests.put,\n \"/_admin/cluster/maintenance\",\n '\"on\"' if on_off else '\"off\"',\n )\n if len(reply) > 0:\n print(\"Reply: \" + str(reply[0].text))\n if reply[0].status_code == 200:\n return\n print(f\"Reply status code is {reply[0].status_code}. Sleeping for 3 s.\")\n time.sleep(3)\n tries -= 1\n else:\n print(\"Reply is empty. Sleeping for 3 s.\")\n time.sleep(3)\n tries -= 1\n if tries <= 0:\n action = \"enable\" if on_off else \"disable\"\n raise Exception(f\"Couldn't {action} maintainance mode!\")",
"def UpdateFromServer(self):\n self.status = GetUserStatus(self.accesskey)",
"def set_remote_status(self, mode):\n status = {\n 0: \"Local and locked\",\n 1: \"Remote and locked\",\n 2: \"Local and unlocked\",\n 3: \"Remote and unlocked\",\n }\n logging.info(__name__ + ' : Setting remote control status to %s' % status.get(mode, \"Unknown\"))\n self._execute('C%s' % mode)",
"def enableRemoteAccess():\n myClusterProps = redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )['Clusters'][0]\n\n # attempt to allow incoming TCP connections to the cluster\n print(\"Enabling incoming TCP connections...\")\n try:\n vpc = ec2.Vpc(id=myClusterProps[\"VpcId\"])\n for sg in vpc.security_groups.all():\n if sg.group_name == \"default\":\n defaultSg = sg\n break\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp=CIDR_IP_ALLOWED,\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)\n return\n\n print(\"\\nCluster is ready for access!\")\n endpoint = myClusterProps[\"Endpoint\"][\"Address\"]\n print(f\"Endpoint: {endpoint}\")",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )"
] |
[
"0.5662341",
"0.5546752",
"0.54727477",
"0.53884345",
"0.5381607",
"0.5305259",
"0.5174011",
"0.51711553",
"0.51159185",
"0.5115369",
"0.5103545",
"0.5077516",
"0.50707024",
"0.50563246",
"0.4988788",
"0.49565467",
"0.49428022",
"0.4870084",
"0.4857425",
"0.48238754",
"0.48203778",
"0.48190278",
"0.48054627",
"0.47940138",
"0.4790904",
"0.47639573",
"0.4754045",
"0.47491997",
"0.46958798",
"0.46709287"
] |
0.7822991
|
0
|
Update api server access profile and fqdn subdomain for the ManagedCluster object.
|
def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
if mc.api_server_access_profile is None:
profile_holder = self.models.ManagedClusterAPIServerAccessProfile()
else:
profile_holder = mc.api_server_access_profile
api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()
disable_public_fqdn = self.context.get_disable_public_fqdn()
enable_public_fqdn = self.context.get_enable_public_fqdn()
if api_server_authorized_ip_ranges is not None:
# empty string is valid as it disables ip whitelisting
profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges
if disable_public_fqdn:
profile_holder.enable_private_cluster_public_fqdn = False
if enable_public_fqdn:
profile_holder.enable_private_cluster_public_fqdn = True
# keep api_server_access_profile empty if none of its properties are updated
if (
profile_holder != mc.api_server_access_profile and
profile_holder == self.models.ManagedClusterAPIServerAccessProfile()
):
profile_holder = None
mc.api_server_access_profile = profile_holder
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n api_server_access_profile = None\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n enable_private_cluster = self.context.get_enable_private_cluster()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n private_dns_zone = self.context.get_private_dns_zone()\n if api_server_authorized_ip_ranges or enable_private_cluster:\n api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile(\n authorized_ip_ranges=api_server_authorized_ip_ranges,\n enable_private_cluster=True if enable_private_cluster else None,\n enable_private_cluster_public_fqdn=False if disable_public_fqdn else None,\n private_dns_zone=private_dns_zone\n )\n mc.api_server_access_profile = api_server_access_profile\n\n fqdn_subdomain = self.context.get_fqdn_subdomain()\n mc.fqdn_subdomain = fqdn_subdomain\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def fusion_api_edit_server_profile(self, body, uri, api=None, headers=None, param=''):\n return self.profile.update(body, uri, api, headers, param=param)",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def fusion_api_edit_datacenter(self, body, uri, api=None, headers=None):\n return self.dc.update(body, uri, api, headers)",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def fusion_api_update_server_certificate(self, aliasname, body, api=None, headers=None):\n return self.server_certificate.put(aliasname, body, api, headers)",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def fusion_api_update_hypervisor_host_profile(self, uri=None, body=None, api=None, headers=None):\n return self.host_profile.update(body, uri, api, headers)",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def UpdateFromServer(self):\n self.status = GetUserStatus(self.accesskey)",
"def fusion_api_patch_server_profile(self, body, uri, api=None, headers=None):\n return self.profile.patch(body, uri, api, headers)",
"def update_server(finished, server):\n name = server['name']\n while not finished.wait(2):\n new_s = fetch_server(VAULTZ[name], server)\n if 'cluster_id' in new_s:\n my_cluster = [x['name']\n for _name, x\n in iteritems(SERVERZ)\n if x.get('cluster_id', None) == new_s['cluster_id']]\n new_s['cluster_members'] = my_cluster\n\n SERVERZ[name] = new_s",
"def update_cluster_config(self, clusterid, config, **kwargs):\n pass",
"def ModifyRealServerName(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyRealServerName\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyRealServerNameResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def _set_nameserver(self, instance):\n ctxt = context.get_admin_context()\n ip = db.instance_get_fixed_address(ctxt, instance['id'])\n network = db.fixed_ip_get_network(ctxt, ip)\n\n try:\n _, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--nameserver', network['dns'])\n if err:\n LOG.error(err)\n except Exception as err:\n LOG.error(err)\n raise exception.Error('Unable to set nameserver for %s' %\n instance['id'])",
"def test_update_virt_realm_remote_access_config(self):\n pass",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def handle_cluster_config(self, request):\n \"\"\"\n @api {get} /cluster/config/:key Get cluster parameter\n @apiName GetClusterConfig\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiParam {string} :key Name of the parameter to get\n \"\"\"\n \"\"\"\n @api {put} /cluster/config/:key Set cluster parameter\n @apiName SetClusterConfig\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiParam {string} :key Name of the parameter to set\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n match = re.match('/cluster/config/(.+)', request.uri_path)\n name = match.group(1)\n\n if request.method == \"GET\":\n try:\n return HTTPReply(body = json.dumps(self.cluster.config.get(name)), headers = headers)\n except KeyError:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})\n\n elif request.method == \"PUT\":\n try:\n self.cluster.config.set(name, json.loads(request.body))\n return HTTPReply(code = 204, headers = {'Access-Control-Allow-Origin': '*'})\n except (ValueError, TypeError) as error:\n return HTTPReply(code = 400, message = str(error), headers = {'Access-Control-Allow-Origin': '*'})\n except KeyError:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})\n\n elif request.method == \"DELETE\":\n try:\n self.cluster.config.clear(name)\n return HTTPReply(code = 204, headers = {'Access-Control-Allow-Origin': '*'})\n except KeyError:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})",
"def update_vsan_cluster(self, cluster_id, **kwargs):\n put_body = json.dumps({'cluster': kwargs})\n resp, body = self.put('clusters/%s' % cluster_id, put_body)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])",
"def update(self, oid):\n path = '/servers/%s' % oid\n res = self.client.call(path, 'PUT', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack server: %s' % truncate(res))\n return res[0]['server']",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def renameCluster(**kwargs):\n vmc_url = kwargs['strProdURL']\n org_id = kwargs['ORG_ID']\n sddc_id = kwargs['SDDC_ID']\n session_token = kwargs['sessiontoken']\n old_cluster_name = kwargs['cluster_name']\n new_cluster_name = kwargs['new_name']\n\n sddc_json = get_sddc_info_json(vmc_url, org_id, session_token, sddc_id)\n cluster_json = sddc_json['resource_config']['clusters']\n sddc_name = sddc_json['name']\n\n old_cluster_names = []\n for n in cluster_json:\n old_cluster_names.append(n['cluster_name'])\n \n if old_cluster_name in old_cluster_names:\n for c in cluster_json:\n if old_cluster_name == c['cluster_name']:\n cid = c['cluster_id']\n json_data = {\n 'cluster_name': new_cluster_name\n }\n response = post_cluster_rename_json(vmc_url, session_token, org_id, cid, json_data)\n if response == 202:\n print(f'Cluster {old_cluster_name} has been renamed to {new_cluster_name}')\n else:\n print(f'There was a problem with the cluster rename operation')\n else:\n pass\n else:\n sys.exit(f'{old_cluster_name} does not exist in {sddc_name}')",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def set_server_metadata(self, name, **metadata):\n # VERBOSE(metadata)\n # leght of metadata is resricted, so e limit\n data = {}\n if metadata is not None and isinstance(metadata, dict) and 'cm' in metadata:\n if isinstance(metadata['cm'], str):\n import json\n data.update(json.loads(metadata['cm'].replace('\\'', '\\\"')))\n else:\n data.update(metadata['cm'])\n\n _data = {}\n for key in data.keys():\n _data[f\"cm_{key}\"] = data[key]\n # VERBOSE(_data)\n\n server = self.cloudman.get_server(name)\n\n self.cloudman.set_server_metadata(server, _data)",
"def fusion_api_update_managed_san(self, body, uri, api=None, headers=None):\n return self.ms.update(body, uri, api, headers)",
"def test_update_hyperflex_cluster(self):\n pass"
] |
[
"0.6785518",
"0.62549794",
"0.56788695",
"0.5587446",
"0.55768114",
"0.5548693",
"0.5440958",
"0.523388",
"0.5209735",
"0.5204637",
"0.5203941",
"0.5169459",
"0.5154716",
"0.51498365",
"0.5145779",
"0.50799966",
"0.50749654",
"0.50468695",
"0.5043714",
"0.50179535",
"0.5007153",
"0.49524045",
"0.49445245",
"0.49281886",
"0.49275845",
"0.4917611",
"0.49050158",
"0.48984",
"0.48867577",
"0.48681036"
] |
0.72265285
|
0
|
Update windows profile for the ManagedCluster object.
|
def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
enable_ahub = self.context.get_enable_ahub()
disable_ahub = self.context.get_disable_ahub()
windows_admin_password = self.context.get_windows_admin_password()
enable_windows_gmsa = self.context.get_enable_windows_gmsa()
if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:
# seems we know the error
raise UnknownError(
"Encounter an unexpected error while getting windows profile from the cluster in the process of update."
)
if enable_ahub:
mc.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
mc.windows_profile.license_type = 'None'
if windows_admin_password:
mc.windows_profile.admin_password = windows_admin_password
if enable_windows_gmsa:
gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()
mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(
enabled=True,
dns_server=gmsa_dns_server,
root_domain_name=gmsa_root_domain_name,
)
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n (\n windows_admin_username,\n windows_admin_password,\n ) = self.context.get_windows_admin_username_and_password()\n if windows_admin_username or windows_admin_password:\n # license\n windows_license_type = None\n if self.context.get_enable_ahub():\n windows_license_type = \"Windows_Server\"\n\n # gmsa\n gmsa_profile = None\n if self.context.get_enable_windows_gmsa():\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n\n # this would throw an error if windows_admin_username is empty (the user enters an empty\n # string after being prompted), since admin_username is a required parameter\n windows_profile = self.models.ManagedClusterWindowsProfile(\n # [SuppressMessage(\"Microsoft.Security\", \"CS002:SecretInNextLine\", Justification=\"variable name\")]\n admin_username=windows_admin_username,\n admin_password=windows_admin_password,\n license_type=windows_license_type,\n gmsa_profile=gmsa_profile,\n )\n\n mc.windows_profile = windows_profile\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def update_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\n \"CONST_MONITORING_ADDON_NAME\"\n )\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\n \"CONST_INGRESS_APPGW_ADDON_NAME\"\n )\n CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get(\n \"CONST_VIRTUAL_NODE_ADDON_NAME\"\n )\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n azure_keyvault_secrets_provider_addon_profile = None\n if mc.addon_profiles is not None:\n monitoring_addon_enabled = (\n CONST_MONITORING_ADDON_NAME in mc.addon_profiles and\n mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled\n )\n ingress_appgw_addon_enabled = (\n CONST_INGRESS_APPGW_ADDON_NAME in mc.addon_profiles and\n mc.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled\n )\n virtual_node_addon_enabled = (\n CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type() in mc.addon_profiles and\n mc.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type()].enabled\n )\n # set intermediates, used later to ensure role assignments\n self.context.set_intermediate(\n \"monitoring_addon_enabled\", monitoring_addon_enabled, overwrite_exists=True\n )\n self.context.set_intermediate(\n \"ingress_appgw_addon_enabled\", ingress_appgw_addon_enabled, overwrite_exists=True\n )\n self.context.set_intermediate(\n \"virtual_node_addon_enabled\", virtual_node_addon_enabled, overwrite_exists=True\n )\n # get azure keyvault secrets provider profile\n azure_keyvault_secrets_provider_addon_profile = mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n )\n\n # update azure keyvault secrets provider profile\n azure_keyvault_secrets_provider_addon_profile = (\n self.update_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n if azure_keyvault_secrets_provider_addon_profile:\n # mc.addon_profiles should not be None if azure_keyvault_secrets_provider_addon_profile is not None\n mc.addon_profiles[\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ] = azure_keyvault_secrets_provider_addon_profile\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def set_up_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if hasattr(self.models, \"ManagedClusterStorageProfile\"):\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_nat_gateway_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Unexpectedly get an empty network profile in the process of updating nat gateway profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY:\n mc.network_profile.nat_gateway_profile = None\n else:\n mc.network_profile.nat_gateway_profile = _update_nat_gateway_profile(\n self.context.get_nat_gateway_managed_outbound_ip_count(),\n self.context.get_nat_gateway_idle_timeout(),\n mc.network_profile.nat_gateway_profile,\n models=self.models.nat_gateway_models,\n )\n return mc",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_disable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(\n enabled=False\n )\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set enabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n\n if self.context.get_disable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set disabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False\n\n return mc",
"def update_profile(self, channels=None): # pragma: no cover\n pass",
"def set_up_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n # set intermediate\n self.context.set_intermediate(\"azuremonitormetrics_addon_enabled\", True, overwrite_exists=True)\n return mc",
"def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def set_up_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_network_plugin_settings(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n network_plugin_mode = self.context.get_network_plugin_mode()\n if network_plugin_mode:\n mc.network_profile.network_plugin_mode = network_plugin_mode\n\n (\n pod_cidr,\n _,\n _,\n _,\n _\n ) = self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n\n network_dataplane = self.context.get_network_dataplane()\n if network_dataplane:\n mc.network_profile.network_dataplane = network_dataplane\n\n if pod_cidr:\n mc.network_profile.pod_cidr = pod_cidr\n return mc",
"def set_up_linux_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n ssh_key_value, no_ssh_key = self.context.get_ssh_key_value_and_no_ssh_key()\n if not no_ssh_key:\n ssh_config = self.models.ContainerServiceSshConfiguration(\n public_keys=[\n self.models.ContainerServiceSshPublicKey(\n key_data=ssh_key_value\n )\n ]\n )\n linux_profile = self.models.ContainerServiceLinuxProfile(\n admin_username=self.context.get_admin_username(), ssh=ssh_config\n )\n mc.linux_profile = linux_profile\n return mc",
"def set_up_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n return mc"
] |
[
"0.6615214",
"0.6559488",
"0.6554667",
"0.6302969",
"0.62154424",
"0.6039585",
"0.60123307",
"0.5999623",
"0.5888613",
"0.58799297",
"0.5857495",
"0.57862186",
"0.575272",
"0.55814606",
"0.5574849",
"0.5490474",
"0.54602957",
"0.5456462",
"0.54487735",
"0.5445578",
"0.54345506",
"0.54007846",
"0.5381682",
"0.5375727",
"0.5255722",
"0.52309763",
"0.5216745",
"0.51721245",
"0.5171327",
"0.51453084"
] |
0.79302394
|
0
|
Update aad profile for the ManagedCluster object.
|
def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
if self.context.get_enable_aad():
mc.aad_profile = self.models.ManagedClusterAADProfile(
managed=True
)
aad_tenant_id = self.context.get_aad_tenant_id()
aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()
enable_azure_rbac = self.context.get_enable_azure_rbac()
disable_azure_rbac = self.context.get_disable_azure_rbac()
if aad_tenant_id is not None:
mc.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
# ids -> i_ds due to track 2 naming issue
mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids
if enable_azure_rbac:
mc.aad_profile.enable_azure_rbac = True
if disable_azure_rbac:
mc.aad_profile.enable_azure_rbac = False
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def set_up_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n aad_profile = None\n enable_aad = self.context.get_enable_aad()\n if enable_aad:\n aad_profile = self.models.ManagedClusterAADProfile(\n managed=True,\n enable_azure_rbac=self.context.get_enable_azure_rbac(),\n # ids -> i_ds due to track 2 naming issue\n admin_group_object_i_ds=self.context.get_aad_admin_group_object_ids(),\n tenant_id=self.context.get_aad_tenant_id()\n )\n else:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = (\n self.context.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()\n )\n aad_tenant_id = self.context.get_aad_tenant_id()\n if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):\n aad_profile = self.models.ManagedClusterAADProfile(\n client_app_id=aad_client_app_id,\n server_app_id=aad_server_app_id,\n server_app_secret=aad_server_app_secret,\n tenant_id=aad_tenant_id\n )\n mc.aad_profile = aad_profile\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def add_update_cluster(self, label, cluster):\n self._clusters[label] = cluster",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def update_cluster_config(self, clusterid, config, **kwargs):\n pass",
"def update_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"async def async_update_avm_device() -> None:\n async_add_entities(await _async_profile_entities_list(avm_wrapper, data_fritz))",
"def update_vsan_cluster(self, cluster_id, **kwargs):\n put_body = json.dumps({'cluster': kwargs})\n resp, body = self.put('clusters/%s' % cluster_id, put_body)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])",
"def add_to_cluster_dictionary(self, cluster):\n\n self.clusters[self.cluster_idx] = cluster\n update_cluster_array(self, cluster, cluster.cluster_idx)\n\n return",
"def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_disable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(\n enabled=False\n )\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set enabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n\n if self.context.get_disable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set disabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False\n\n return mc",
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )"
] |
[
"0.65236986",
"0.64542335",
"0.6392261",
"0.6273808",
"0.62069446",
"0.62045646",
"0.6041641",
"0.58790123",
"0.5821626",
"0.5816845",
"0.57900316",
"0.57251436",
"0.57209194",
"0.5489699",
"0.54597247",
"0.54454964",
"0.54235744",
"0.5406756",
"0.5329346",
"0.5309974",
"0.5303684",
"0.5256076",
"0.5251642",
"0.524842",
"0.5173333",
"0.5160467",
"0.5145575",
"0.5136312",
"0.513476",
"0.5110386"
] |
0.7686997
|
0
|
Update OIDC issuer profile for the ManagedCluster object.
|
def update_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
oidc_issuer_profile = self.context.get_oidc_issuer_profile()
if oidc_issuer_profile is not None:
mc.oidc_issuer_profile = oidc_issuer_profile
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def get_oidc_issuer_profile(self) -> ManagedClusterOIDCIssuerProfile:\n enable_flag_value = bool(self.raw_param.get(\"enable_oidc_issuer\"))\n if not enable_flag_value:\n # enable flag not set, return a None profile, server side will backfill the default/existing value\n return None\n\n profile = self.models.ManagedClusterOIDCIssuerProfile()\n if self.decorator_mode == DecoratorMode.UPDATE:\n if self.mc.oidc_issuer_profile is not None:\n profile = self.mc.oidc_issuer_profile\n profile.enabled = True\n\n return profile",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def set_up_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity_profile = None\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n kubelet_identity = self.context.get_identity_by_msi_client(assign_kubelet_identity)\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n client_id=kubelet_identity.client_id, # TODO: may remove, rp would take care of this\n object_id=kubelet_identity.principal_id # TODO: may remove, rp would take care of this\n )\n }\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id()\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def set_issuer(self, claim=ISSUER):\n if api_settings.ISSUER is not None:\n self.payload[claim] = api_settings.ISSUER",
"def set_up_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n api_server_access_profile = None\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n enable_private_cluster = self.context.get_enable_private_cluster()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n private_dns_zone = self.context.get_private_dns_zone()\n if api_server_authorized_ip_ranges or enable_private_cluster:\n api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile(\n authorized_ip_ranges=api_server_authorized_ip_ranges,\n enable_private_cluster=True if enable_private_cluster else None,\n enable_private_cluster_public_fqdn=False if disable_public_fqdn else None,\n private_dns_zone=private_dns_zone\n )\n mc.api_server_access_profile = api_server_access_profile\n\n fqdn_subdomain = self.context.get_fqdn_subdomain()\n mc.fqdn_subdomain = fqdn_subdomain\n return mc",
"async def update_issuer(self, issuer_name: str, **kwargs) -> CertificateIssuer:\n\n enabled = kwargs.pop(\"enabled\", None)\n account_id = kwargs.pop(\"account_id\", None)\n password = kwargs.pop(\"password\", None)\n organization_id = kwargs.pop(\"organization_id\", None)\n admin_contacts = kwargs.pop(\"admin_contacts\", None)\n\n if account_id or password:\n issuer_credentials = self._models.IssuerCredentials(account_id=account_id, password=password)\n else:\n issuer_credentials = None\n if admin_contacts:\n admin_details: Optional[List[Any]] = list(\n self._models.AdministratorDetails(\n first_name=contact.first_name,\n last_name=contact.last_name,\n email_address=contact.email,\n phone=contact.phone,\n )\n for contact in admin_contacts\n )\n else:\n admin_details = None\n if organization_id or admin_details:\n organization_details = self._models.OrganizationDetails(id=organization_id, admin_details=admin_details)\n else:\n organization_details = None\n if enabled is not None:\n issuer_attributes = self._models.IssuerAttributes(enabled=enabled)\n else:\n issuer_attributes = None\n\n parameters = self._models.CertificateIssuerUpdateParameters(\n provider=kwargs.pop(\"provider\", None),\n credentials=issuer_credentials,\n organization_details=organization_details,\n attributes=issuer_attributes,\n )\n\n issuer_bundle = await self._client.update_certificate_issuer(\n vault_base_url=self.vault_url, issuer_name=issuer_name, parameter=parameters, **kwargs\n )\n return CertificateIssuer._from_issuer_bundle(issuer_bundle=issuer_bundle)",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def set_up_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n aad_profile = None\n enable_aad = self.context.get_enable_aad()\n if enable_aad:\n aad_profile = self.models.ManagedClusterAADProfile(\n managed=True,\n enable_azure_rbac=self.context.get_enable_azure_rbac(),\n # ids -> i_ds due to track 2 naming issue\n admin_group_object_i_ds=self.context.get_aad_admin_group_object_ids(),\n tenant_id=self.context.get_aad_tenant_id()\n )\n else:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = (\n self.context.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()\n )\n aad_tenant_id = self.context.get_aad_tenant_id()\n if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):\n aad_profile = self.models.ManagedClusterAADProfile(\n client_app_id=aad_client_app_id,\n server_app_id=aad_server_app_id,\n server_app_secret=aad_server_app_secret,\n tenant_id=aad_tenant_id\n )\n mc.aad_profile = aad_profile\n return mc",
"def set_up_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_cluster_security_group(ec2, cluster_props):\n vpc = ec2.Vpc(id=cluster_props['VpcId'])\n\n # The first Security group should be the default one\n defaultSg = list(vpc.security_groups.all())[0]\n print(\"Default Security group:\", defaultSg)\n\n # Authorize access\n try:\n defaultSg.authorize_ingress(GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n print(\"Access authorized\")\n except botocore.exceptions.ClientError as e:\n print(\"ClientError:\", e)\n except Exception as e:\n print(\"Error:\", e)",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def aks_cluster_profile(self) -> 'outputs.ClusterPoolResourcePropertiesResponseAksClusterProfile':\n return pulumi.get(self, \"aks_cluster_profile\")",
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def set_up_service_principal_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # If customer explicitly provide a service principal, disable managed identity.\n (\n service_principal,\n client_secret,\n ) = self.context.get_service_principal_and_client_secret()\n enable_managed_identity = self.context.get_enable_managed_identity()\n # Skip create service principal profile for the cluster if the cluster enables managed identity\n # and customer doesn't explicitly provide a service principal.\n if not (\n enable_managed_identity and\n not service_principal and\n not client_secret\n ):\n service_principal_profile = (\n self.models.ManagedClusterServicePrincipalProfile(\n client_id=service_principal, secret=client_secret\n )\n )\n mc.service_principal_profile = service_principal_profile\n return mc"
] |
[
"0.7138633",
"0.6434838",
"0.6359209",
"0.6091803",
"0.5943593",
"0.5936524",
"0.5852094",
"0.5840535",
"0.58331877",
"0.56902266",
"0.56145376",
"0.55521697",
"0.5457869",
"0.53420657",
"0.5270344",
"0.52316934",
"0.51749843",
"0.5146125",
"0.5134343",
"0.5117193",
"0.5093054",
"0.50750816",
"0.5051044",
"0.5011815",
"0.5000755",
"0.49824372",
"0.49822316",
"0.4948873",
"0.49294832",
"0.49238795"
] |
0.79235494
|
0
|
Update auto upgrade profile for the ManagedCluster object.
|
def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
auto_upgrade_channel = self.context.get_auto_upgrade_channel()
if auto_upgrade_channel is not None:
if mc.auto_upgrade_profile is None:
mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()
mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel
node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()
if node_os_upgrade_channel is not None:
if mc.auto_upgrade_profile is None:
mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()
mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_profile = None\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel:\n auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile(upgrade_channel=auto_upgrade_channel)\n mc.auto_upgrade_profile = auto_upgrade_profile\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_disable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(\n enabled=False\n )\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set enabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n\n if self.context.get_disable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set disabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False\n\n return mc",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def update_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\n \"CONST_MONITORING_ADDON_NAME\"\n )\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\n \"CONST_INGRESS_APPGW_ADDON_NAME\"\n )\n CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get(\n \"CONST_VIRTUAL_NODE_ADDON_NAME\"\n )\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n azure_keyvault_secrets_provider_addon_profile = None\n if mc.addon_profiles is not None:\n monitoring_addon_enabled = (\n CONST_MONITORING_ADDON_NAME in mc.addon_profiles and\n mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled\n )\n ingress_appgw_addon_enabled = (\n CONST_INGRESS_APPGW_ADDON_NAME in mc.addon_profiles and\n mc.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled\n )\n virtual_node_addon_enabled = (\n CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type() in mc.addon_profiles and\n mc.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type()].enabled\n )\n # set intermediates, used later to ensure role assignments\n self.context.set_intermediate(\n \"monitoring_addon_enabled\", monitoring_addon_enabled, overwrite_exists=True\n )\n self.context.set_intermediate(\n \"ingress_appgw_addon_enabled\", ingress_appgw_addon_enabled, overwrite_exists=True\n )\n self.context.set_intermediate(\n \"virtual_node_addon_enabled\", virtual_node_addon_enabled, overwrite_exists=True\n )\n # get azure keyvault secrets provider profile\n azure_keyvault_secrets_provider_addon_profile = mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n )\n\n # update azure keyvault secrets provider profile\n azure_keyvault_secrets_provider_addon_profile = (\n self.update_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n if azure_keyvault_secrets_provider_addon_profile:\n # mc.addon_profiles should not be None if azure_keyvault_secrets_provider_addon_profile is not None\n mc.addon_profiles[\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ] = azure_keyvault_secrets_provider_addon_profile\n return mc",
"def set_up_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler(enabled=True)\n else:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n return mc",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def set_up_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n return mc",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def set_up_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if hasattr(self.models, \"ManagedClusterStorageProfile\"):\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_network_plugin_settings(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n network_plugin_mode = self.context.get_network_plugin_mode()\n if network_plugin_mode:\n mc.network_profile.network_plugin_mode = network_plugin_mode\n\n (\n pod_cidr,\n _,\n _,\n _,\n _\n ) = self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n\n network_dataplane = self.context.get_network_dataplane()\n if network_dataplane:\n mc.network_profile.network_dataplane = network_dataplane\n\n if pod_cidr:\n mc.network_profile.pod_cidr = pod_cidr\n return mc",
"def update_image_cleaner(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_image_cleaner = self.context.get_enable_image_cleaner()\n disable_image_cleaner = self.context.get_disable_image_cleaner()\n interval_hours = self.context.get_image_cleaner_interval_hours()\n\n # no image cleaner related changes\n if not enable_image_cleaner and not disable_image_cleaner and interval_hours is None:\n return mc\n\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n image_cleaner_profile = mc.security_profile.image_cleaner\n\n if image_cleaner_profile is None:\n image_cleaner_profile = self.models.ManagedClusterSecurityProfileImageCleaner()\n mc.security_profile.image_cleaner = image_cleaner_profile\n\n # init the image cleaner profile\n image_cleaner_profile.enabled = False\n image_cleaner_profile.interval_hours = 7 * 24\n\n if enable_image_cleaner:\n image_cleaner_profile.enabled = True\n\n if disable_image_cleaner:\n image_cleaner_profile.enabled = False\n\n if interval_hours is not None:\n image_cleaner_profile.interval_hours = interval_hours\n\n return mc",
"def set_up_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n aad_profile = None\n enable_aad = self.context.get_enable_aad()\n if enable_aad:\n aad_profile = self.models.ManagedClusterAADProfile(\n managed=True,\n enable_azure_rbac=self.context.get_enable_azure_rbac(),\n # ids -> i_ds due to track 2 naming issue\n admin_group_object_i_ds=self.context.get_aad_admin_group_object_ids(),\n tenant_id=self.context.get_aad_tenant_id()\n )\n else:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = (\n self.context.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()\n )\n aad_tenant_id = self.context.get_aad_tenant_id()\n if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):\n aad_profile = self.models.ManagedClusterAADProfile(\n client_app_id=aad_client_app_id,\n server_app_id=aad_server_app_id,\n server_app_secret=aad_server_app_secret,\n tenant_id=aad_tenant_id\n )\n mc.aad_profile = aad_profile\n return mc",
"def update_nat_gateway_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Unexpectedly get an empty network profile in the process of updating nat gateway profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY:\n mc.network_profile.nat_gateway_profile = None\n else:\n mc.network_profile.nat_gateway_profile = _update_nat_gateway_profile(\n self.context.get_nat_gateway_managed_outbound_ip_count(),\n self.context.get_nat_gateway_idle_timeout(),\n mc.network_profile.nat_gateway_profile,\n models=self.models.nat_gateway_models,\n )\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError"
] |
[
"0.7363615",
"0.7225819",
"0.70861536",
"0.6530185",
"0.6529984",
"0.65046024",
"0.6463127",
"0.6392512",
"0.6330424",
"0.6251988",
"0.60825664",
"0.604037",
"0.5928965",
"0.591562",
"0.5909689",
"0.5905758",
"0.5892767",
"0.5789834",
"0.57436556",
"0.5720232",
"0.5621832",
"0.56196755",
"0.5581432",
"0.5535055",
"0.54561985",
"0.53506047",
"0.533182",
"0.53265095",
"0.53245246",
"0.53245133"
] |
0.7936828
|
0
|
Update network plugin settings of network profile for the ManagedCluster object.
|
def update_network_plugin_settings(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
network_plugin_mode = self.context.get_network_plugin_mode()
if network_plugin_mode:
mc.network_profile.network_plugin_mode = network_plugin_mode
(
pod_cidr,
_,
_,
_,
_
) = self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()
network_dataplane = self.context.get_network_dataplane()
if network_dataplane:
mc.network_profile.network_dataplane = network_dataplane
if pod_cidr:
mc.network_profile.pod_cidr = pod_cidr
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_nat_gateway_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Unexpectedly get an empty network profile in the process of updating nat gateway profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY:\n mc.network_profile.nat_gateway_profile = None\n else:\n mc.network_profile.nat_gateway_profile = _update_nat_gateway_profile(\n self.context.get_nat_gateway_managed_outbound_ip_count(),\n self.context.get_nat_gateway_idle_timeout(),\n mc.network_profile.nat_gateway_profile,\n models=self.models.nat_gateway_models,\n )\n return mc",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def update_cluster_config(self, clusterid, config, **kwargs):\n pass",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_compute_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if not rconfig.has_section('compute'):\n rconfig.add_section('compute')\n rconfig.set(\n 'compute', 'fixed_network_name',\n self.network.name if self.network else env.get(\"EXTERNAL_NETWORK\"))\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_network_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if self.ext_net:\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'public_network_id', self.ext_net.id)\n rconfig.set('network', 'floating_network_name', self.ext_net.name)\n rconfig.set('network-feature-enabled', 'floating_ips', True)\n else:\n if not rconfig.has_section('network-feature-enabled'):\n rconfig.add_section('network-feature-enabled')\n rconfig.set('network-feature-enabled', 'floating_ips', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def update_zcs_settings(session, network, lowport, highport,\n return_type=None, **kwargs):\n verify_low_high_port(lowport, highport)\n\n body_values = {'network': network, 'lowport': lowport,\n 'highport': highport}\n\n path = '/api/settings/container_service.json'\n\n return session.post_api(path=path, body=body_values,\n return_type=return_type, **kwargs)",
"def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net",
"def update_network_profile(arn=None, name=None, description=None, type=None, uplinkBandwidthBits=None, downlinkBandwidthBits=None, uplinkDelayMs=None, downlinkDelayMs=None, uplinkJitterMs=None, downlinkJitterMs=None, uplinkLossPercent=None, downlinkLossPercent=None):\n pass",
"def set_up_network_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # build load balancer profile, which is part of the network profile\n load_balancer_profile = create_load_balancer_profile(\n self.context.get_load_balancer_managed_outbound_ip_count(),\n self.context.get_load_balancer_managed_outbound_ipv6_count(),\n self.context.get_load_balancer_outbound_ips(),\n self.context.get_load_balancer_outbound_ip_prefixes(),\n self.context.get_load_balancer_outbound_ports(),\n self.context.get_load_balancer_idle_timeout(),\n models=self.models.load_balancer_models,\n )\n\n # verify outbound type\n # Note: Validation internally depends on load_balancer_sku, which is a temporary value that is\n # dynamically completed.\n outbound_type = self.context.get_outbound_type(\n load_balancer_profile=load_balancer_profile\n )\n\n # verify load balancer sku\n load_balancer_sku = safe_lower(self.context.get_load_balancer_sku())\n\n # verify network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy\n network_plugin = self.context.get_network_plugin()\n network_plugin_mode = self.context.get_network_plugin_mode()\n (\n pod_cidr,\n service_cidr,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n ) = (\n self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n )\n network_profile = None\n # set up pod_cidrs, service_cidrs and ip_families\n (\n pod_cidrs,\n service_cidrs,\n ip_families\n ) = (\n self.context.get_pod_cidrs_and_service_cidrs_and_ip_families()\n )\n\n network_dataplane = self.context.get_network_dataplane()\n\n if any(\n [\n network_plugin,\n network_plugin_mode,\n pod_cidr,\n pod_cidrs,\n service_cidr,\n service_cidrs,\n ip_families,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n network_dataplane,\n ]\n ):\n # Attention: RP would return UnexpectedLoadBalancerSkuForCurrentOutboundConfiguration internal server error\n # if load_balancer_sku is set to basic and load_balancer_profile is assigned.\n # Attention: SDK provides default values for pod_cidr, service_cidr, dns_service_ip, docker_bridge_cidr\n # and outbound_type, and they might be overwritten to None.\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=network_plugin,\n network_plugin_mode=network_plugin_mode,\n pod_cidr=pod_cidr,\n pod_cidrs=pod_cidrs,\n service_cidr=service_cidr,\n service_cidrs=service_cidrs,\n ip_families=ip_families,\n dns_service_ip=dns_service_ip,\n docker_bridge_cidr=docker_bridge_address,\n network_policy=network_policy,\n network_dataplane=network_dataplane,\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n else:\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_STANDARD or load_balancer_profile:\n network_profile = self.models.ContainerServiceNetworkProfile(\n network_plugin=\"kubenet\",\n load_balancer_sku=load_balancer_sku,\n load_balancer_profile=load_balancer_profile,\n outbound_type=outbound_type,\n )\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_BASIC:\n # load balancer sku must be standard when load balancer profile is provided\n network_profile = self.models.ContainerServiceNetworkProfile(\n load_balancer_sku=load_balancer_sku,\n )\n\n # build nat gateway profile, which is part of the network profile\n nat_gateway_profile = create_nat_gateway_profile(\n self.context.get_nat_gateway_managed_outbound_ip_count(),\n self.context.get_nat_gateway_idle_timeout(),\n models=self.models.nat_gateway_models,\n )\n load_balancer_sku = self.context.get_load_balancer_sku()\n if load_balancer_sku != CONST_LOAD_BALANCER_SKU_BASIC:\n network_profile.nat_gateway_profile = nat_gateway_profile\n mc.network_profile = network_profile\n return mc",
"def update_plugin_node_attributes(cls, attributes):\n plugins_attributes = {}\n for k in list(attributes):\n if cls.is_plugin_data(attributes[k]):\n attribute_data = attributes.pop(k)\n attribute_data['metadata'].pop('class')\n node_plugin_id = \\\n attribute_data['metadata'].pop('node_plugin_id')\n plugins_attributes.setdefault(\n node_plugin_id, {}).update({k: attribute_data})\n\n # TODO(ekosareva): think about changed metadata or sections set\n for plugin_id, plugin_attributes in six.iteritems(plugins_attributes):\n NodeClusterPlugin.set_attributes(\n plugin_id,\n plugin_attributes\n )",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def test_update_hyperflex_cluster_network_policy(self):\n pass",
"def set_up_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if hasattr(self.models, \"ManagedClusterStorageProfile\"):\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_policy_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))",
"def update_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\n \"CONST_MONITORING_ADDON_NAME\"\n )\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\n \"CONST_INGRESS_APPGW_ADDON_NAME\"\n )\n CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get(\n \"CONST_VIRTUAL_NODE_ADDON_NAME\"\n )\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n azure_keyvault_secrets_provider_addon_profile = None\n if mc.addon_profiles is not None:\n monitoring_addon_enabled = (\n CONST_MONITORING_ADDON_NAME in mc.addon_profiles and\n mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled\n )\n ingress_appgw_addon_enabled = (\n CONST_INGRESS_APPGW_ADDON_NAME in mc.addon_profiles and\n mc.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled\n )\n virtual_node_addon_enabled = (\n CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type() in mc.addon_profiles and\n mc.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type()].enabled\n )\n # set intermediates, used later to ensure role assignments\n self.context.set_intermediate(\n \"monitoring_addon_enabled\", monitoring_addon_enabled, overwrite_exists=True\n )\n self.context.set_intermediate(\n \"ingress_appgw_addon_enabled\", ingress_appgw_addon_enabled, overwrite_exists=True\n )\n self.context.set_intermediate(\n \"virtual_node_addon_enabled\", virtual_node_addon_enabled, overwrite_exists=True\n )\n # get azure keyvault secrets provider profile\n azure_keyvault_secrets_provider_addon_profile = mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n )\n\n # update azure keyvault secrets provider profile\n azure_keyvault_secrets_provider_addon_profile = (\n self.update_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n if azure_keyvault_secrets_provider_addon_profile:\n # mc.addon_profiles should not be None if azure_keyvault_secrets_provider_addon_profile is not None\n mc.addon_profiles[\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ] = azure_keyvault_secrets_provider_addon_profile\n return mc",
"def update_outbound_type_in_network_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n outboundType = self.context.get_outbound_type()\n if outboundType:\n vnet_subnet_id = self.context.get_vnet_subnet_id()\n if vnet_subnet_id is None and outboundType not in [\n CONST_OUTBOUND_TYPE_LOAD_BALANCER,\n CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY,\n CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING\n ]:\n raise InvalidArgumentValueError(\"Invalid outbound type, supported values are loadBalancer,\"\n \" managedNATGateway, userAssignedNATGateway and userDefinedRouting.\")\n if vnet_subnet_id is not None and outboundType not in [\n CONST_OUTBOUND_TYPE_LOAD_BALANCER,\n CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY,\n CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING\n ]:\n raise InvalidArgumentValueError(\"Invalid outbound type, supported values are loadBalancer,\"\n \" managedNATGateway, userAssignedNATGateway and userDefinedRouting.\")\n mc.network_profile.outbound_type = outboundType\n return mc",
"def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def update_neutron_advanced_configuration(self, option, value):\n attributes = self.nailgun_client.get_cluster_attributes(\n self.cluster_id)\n nac_subdict = attributes['editable']['neutron_advanced_configuration']\n nac_subdict[option]['value'] = value\n self.nailgun_client.update_cluster_attributes(\n self.cluster_id, attributes)",
"def test_patch_cluster_network(self):\n pass",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def test_replace_cluster_network(self):\n pass",
"def update_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.update_network(network)\n except:\n pass",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc"
] |
[
"0.6287261",
"0.62562764",
"0.6206155",
"0.5919854",
"0.5832747",
"0.5745208",
"0.5725939",
"0.5664839",
"0.5643876",
"0.56297773",
"0.5584582",
"0.54472065",
"0.5360767",
"0.53581583",
"0.534671",
"0.53357637",
"0.53113896",
"0.52554923",
"0.5243599",
"0.51855236",
"0.51839894",
"0.516179",
"0.5147673",
"0.51283026",
"0.5102134",
"0.50882435",
"0.5082535",
"0.50351524",
"0.50298804",
"0.50270426"
] |
0.8056863
|
0
|
Set up http proxy config for the ManagedCluster object.
|
def update_http_proxy_config(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
mc.http_proxy_config = self.context.get_http_proxy_config()
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_http_proxy_config(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.http_proxy_config = self.context.get_http_proxy_config()\n return mc",
"def configure_proxy(self, proxy):\n server_name = self.get_external_domain()\n tls_enabled = self.get_tls()\n ircd_enabled = self.charm_config.get(\"enable-ircd\")\n federation_enabled = self.get_federation()\n\n if tls_enabled:\n self.external_port = 443\n else:\n self.external_port = 80\n\n proxy_config = [\n {\n \"mode\": \"http\",\n \"external_port\": self.external_port,\n \"internal_host\": self.get_internal_host(),\n \"internal_port\": 8008,\n \"subdomain\": server_name,\n },\n ]\n\n if federation_enabled:\n proxy_config.append(\n {\n \"mode\": self.get_federation_mode(),\n \"external_port\": 8448,\n \"internal_host\": self.get_internal_host(),\n \"internal_port\": 8448,\n }\n )\n\n if ircd_enabled:\n proxy_config.append(\n {\n \"mode\": self.get_irc_mode(),\n \"external_port\": self.get_irc_port(),\n \"internal_host\": self.get_internal_host(),\n \"internal_port\": self.irc_internal_port,\n }\n )\n\n proxy.configure(proxy_config)",
"def proxy_settings(self):\n if config.proxy_host is None or config.proxy_host == \"\":\n return\n\n proxy = urllib2.ProxyHandler({\"http\": config.proxy_host})\n opener = urllib2.build_opener(proxy)\n urllib2.install_opener(opener)",
"def set_proxy(self, http_proxy):\n self.http_proxy = http_proxy\n self._geturl.http_proxy = http_proxy",
"def configureProxy():\n # config\n port = config.get(\"proxy\", \"port\")\n allowedDomains = config.get(\"proxy\", \"alloweddomains\")\n listeningIP = config.get(\"hotspot\", \"ip\")\n # wan dns\n proxyNSConfig = \"\"\n for dnsServer in wandns:\n proxyNSConfig = f\"{proxyNSConfig}nserver {dnsServer}\\n\"\n # 3proxy configurations\n proxyConfig = f\"\"\"#!/bin/3proxy\n#daemon\npidfile /var/run/3proxy.pid\nchroot /usr/local/3proxy proxy proxy\nnscache 65536\n{proxyNSConfig}\nlog /logs/3proxy-%y%m%d.log D\nrotate 1\ncounter /count/3proxy.3cf\ninclude /conf/counters\ninclude /conf/bandlimiters\nauth iponly\nallow * * {allowedDomains}\ndeny *\nproxy -e{wanip} -i{listeningIP} -p{port}\n\"\"\"\n confFile = open(\"/etc/3proxy/3proxy.cfg\", \"w\")\n confFile.write(proxyConfig)\n confFile.close()",
"def __init__(self, proxy_enabled: ConfigNodePropertyBoolean=None, proxy_host: ConfigNodePropertyString=None, proxy_port: ConfigNodePropertyInteger=None, proxy_user: ConfigNodePropertyString=None, proxy_password: ConfigNodePropertyString=None, proxy_exceptions: ConfigNodePropertyArray=None): # noqa: E501\n self.openapi_types = {\n 'proxy_enabled': ConfigNodePropertyBoolean,\n 'proxy_host': ConfigNodePropertyString,\n 'proxy_port': ConfigNodePropertyInteger,\n 'proxy_user': ConfigNodePropertyString,\n 'proxy_password': ConfigNodePropertyString,\n 'proxy_exceptions': ConfigNodePropertyArray\n }\n\n self.attribute_map = {\n 'proxy_enabled': 'proxy.enabled',\n 'proxy_host': 'proxy.host',\n 'proxy_port': 'proxy.port',\n 'proxy_user': 'proxy.user',\n 'proxy_password': 'proxy.password',\n 'proxy_exceptions': 'proxy.exceptions'\n }\n\n self._proxy_enabled = proxy_enabled\n self._proxy_host = proxy_host\n self._proxy_port = proxy_port\n self._proxy_user = proxy_user\n self._proxy_password = proxy_password\n self._proxy_exceptions = proxy_exceptions",
"async def _create_proxy(self):\n self._proxy = await self._controller.fopen_tcp_proxy(\n Cellular._DRONE_WEB_API_PORT\n )\n\n self._drone_http_url = f\"http://{self._proxy.address}:{self._proxy.port}\"\n\n if self._autoconfigure and self._user_apc_token is None:\n self.logger.info(\"cellular auto pairing and configuration\")\n # generate a new anonymous user APC token and configure the cellular.\n self._fautoconfigure_with_new_token()",
"def proxy_host(self, proxy_host: ConfigNodePropertyString):\n\n self._proxy_host = proxy_host",
"def _set_config(self):\n\n self.config.data_path = \"http://{0}:{1}/db/data\".format(\n self.config.host,\n self.config.port)\n\n self.config.node_path = \"/\".join([self.config.data_path, \"node\"])\n self.config.headers = dict([])\n self.config.headers[\"get\"] = {\"Accept\": \"application/json\"}\n self.config.headers[\"put\"] = {\"Content-Type\": \"application/json\"}",
"def _swift_proxy_setup(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings')):\n self._pull_configs('proxy')\n self._swift_install('proxy')\n self._set_onhold('proxy')\n self._final_install_touches('proxy')",
"def test_add_proxy():\n result = False\n\n proxy = {\n \"name\": \"proxy\",\n \"address\": \"proxy.ntnxlab.local\",\n \"port\": \"8080\",\n \"http\": True,\n \"https\": True,\n \"socks\": False,\n \"username\": '',\n \"password\": '',\n }\n\n cluster_obj = prism.Cluster(api_client=_api())\n config_obj = prism.Config(api_client=_api())\n clusters = cluster_obj.get_all_uuids()\n for each_uuid in clusters:\n config_obj.set_proxy(address=proxy['address'], port=proxy['port'], name=proxy['name'], http=proxy['http'], https=proxy['https'],\n username=proxy['username'], password=proxy['password'], socks=proxy['socks'], clusteruuid=each_uuid)\n cluster_proxy = config_obj.get_proxy(clusteruuid=each_uuid)\n\n if proxy['address'] == cluster_proxy[0]['address']:\n result = True\n\n assert result",
"def auto_configure(self):\n try:\n remote_config = getjson(\n url=self.config_url,\n failover_url=self.failover_config_url\n )\n except Exception:\n if not self.connected_to_internet():\n raise Exception(\n 'Cannot initialize without internet access if parameters '\n 'are not supplied. Maybe check that your internet connection '\n 'is working.'\n )\n else:\n raise Exception(\n 'Could not initialize. Possibly due to slow internet. '\n 'Maybe try again in a couple of moments.'\n )\n if remote_config is not None:\n for parameter in self._REMOTELY_CONFIGURABLE:\n try:\n setattr(self, parameter, remote_config[parameter.lower()])\n except AttributeError:\n logging.warning(\n f'Could not set {parameter} as it might be missing '\n 'from remote configuration.'\n )\n else:\n raise Exception('Could not obtain remote configuration')",
"def setproxy(self,proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):\r\n self.__proxy = (proxytype,addr,port,rdns,username,password)",
"def http_proxy_config(self) -> Optional[pulumi.Input['HttpProxyConfigArgs']]:\n return pulumi.get(self, \"http_proxy_config\")",
"def set_proxies(proxy_dict={}):\n global proxies\n proxies = proxy_dict",
"def get_http_proxy_config(self) -> Union[Dict, ManagedClusterHTTPProxyConfig, None]:\n # read the original value passed by the command\n http_proxy_config = None\n http_proxy_config_file_path = self.raw_param.get(\"http_proxy_config\")\n # validate user input\n if http_proxy_config_file_path:\n if not os.path.isfile(http_proxy_config_file_path):\n raise InvalidArgumentValueError(\n \"{} is not valid file, or not accessable.\".format(\n http_proxy_config_file_path\n )\n )\n http_proxy_config = get_file_json(http_proxy_config_file_path)\n if not isinstance(http_proxy_config, dict):\n raise InvalidArgumentValueError(\n \"Error reading Http Proxy Config from {}. \"\n \"Please see https://aka.ms/HttpProxyConfig for correct format.\".format(\n http_proxy_config_file_path\n )\n )\n\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"http_proxy_config\") and\n self.mc.http_proxy_config is not None\n ):\n http_proxy_config = self.mc.http_proxy_config\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return http_proxy_config",
"def set_proxy(proxy_url=None, proxy_username=None, proxy_password=None):\n\n instance = Ceic._get_instance()\n\n instance._ceic_configuration.set_proxy(proxy_url, proxy_username, proxy_password)\n\n return instance",
"def test_update_proxy():\n result = False\n\n proxy = {\n 'name': 'proxy',\n 'address': 'proxy2.ntnxlab.local',\n 'port': 8080,\n 'http': True,\n 'https': True,\n 'socks': False,\n 'username': '',\n 'password': '',\n }\n\n cluster_obj = prism.Cluster(api_client=_api())\n config_obj = prism.Config(api_client=_api())\n clusters = cluster_obj.get_all_uuids()\n for each_uuid in clusters:\n config_obj.set_proxy(address=proxy['address'], port=proxy['port'], name=proxy['name'], http=proxy['http'], https=proxy['https'],\n username=proxy['username'], password=proxy['password'], socks=proxy['socks'], clusteruuid=each_uuid)\n cluster_proxy = config_obj.get_proxy(clusteruuid=each_uuid)\n\n if proxy['address'] == cluster_proxy[0]['address']:\n result = True\n\n assert result",
"def winhttp_WinHttpSetDefaultProxyConfiguration(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pProxyInfo\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def initProxy(self,parent):\n\n params_file=str(parent)+\"/../statics/params.cfg\"\n print params_file\n param_dict=Loader.factory('NML').load(params_file)\n self.proxyserver=str(param_dict.get('proxy','proxy_adress'))\n self.proxyuser=str(param_dict.get('proxy','proxy_user'))\n self.proxypass=str(param_dict.get('proxy','proxy_pass'))\n self.cmemsuser=str(param_dict.get('cmems_server','user_cmems'))\n self.cmemspass=str(param_dict.get('cmems_server','pass_cmems'))",
"def set_proxy_with_environment_variable():\r\n\r\n logging.debug('set_proxy_with_environment_variable()')\r\n\r\n proxies['http'] = os.getenv('HTTP_PROXY','http://0.0.0.0:80/')\r\n proxies['https'] = os.getenv('HTTPS_PROXY','http://0.0.0.0:80/')",
"def _set_advanced_config_for_botocore_client(dbapi):\n config = None\n\n proxies = dbapi.service_parameter_get_all(\n service=constants.SERVICE_TYPE_DOCKER,\n section=constants.SERVICE_PARAM_SECTION_DOCKER_PROXY)\n\n proxies_dict = {}\n for proxy in proxies:\n if proxy.name == constants.SERVICE_PARAM_NAME_DOCKER_HTTP_PROXY:\n proxies_dict.update({'http': str(proxy.value)})\n\n elif proxy.name == constants.SERVICE_PARAM_NAME_DOCKER_HTTPS_PROXY:\n proxies_dict.update({'https': str(proxy.value)})\n\n if proxies_dict:\n config = Config(proxies=proxies_dict)\n return config",
"def __init__(self):\n self.host = CONF.zvm.zvm_xcat_server\n self.port = 443\n self.conn = HTTPSClientAuthConnection(self.host, self.port,\n CONF.zvm.zvm_xcat_ca_file,\n timeout=CONF.zvm.zvm_xcat_connection_timeout)",
"def _start_proxies_if_needed(self) -> None:\n\n for node_id, node_ip_address in self._get_target_nodes():\n if node_id in self._proxy_states:\n continue\n\n name = format_actor_name(SERVE_PROXY_NAME, self._controller_name, node_id)\n try:\n proxy = ray.get_actor(name, namespace=SERVE_NAMESPACE)\n except ValueError:\n logger.info(\n \"Starting HTTP proxy with name '{}' on node '{}' \"\n \"listening on '{}:{}'\".format(\n name, node_id, self._config.host, self._config.port\n ),\n extra={\"log_to_stderr\": False},\n )\n proxy = HTTPProxyActor.options(\n num_cpus=self._config.num_cpus,\n name=name,\n namespace=SERVE_NAMESPACE,\n lifetime=\"detached\" if self._detached else None,\n max_concurrency=ASYNC_CONCURRENCY,\n max_restarts=-1,\n max_task_retries=-1,\n scheduling_strategy=NodeAffinitySchedulingStrategy(\n node_id, soft=False\n ),\n ).remote(\n self._config.host,\n self._config.port,\n self._config.root_path,\n controller_name=self._controller_name,\n node_ip_address=node_ip_address,\n http_middlewares=self._config.middlewares,\n )\n\n self._proxy_states[node_id] = HTTPProxyState(\n proxy, name, node_id, node_ip_address\n )",
"def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):\r\n self.__proxy = (proxytype, addr, port, rdns, username, password)",
"def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):\r\n self.__proxy = (proxytype, addr, port, rdns, username, password)",
"def service_proxy_settings(private_base_url):\n return rawobj.Proxy(private_base_url(\"echo_api\"))",
"def set_proxy(self):",
"def proxy_host(self, proxy_host):\n\n self._proxy_host = proxy_host",
"def getClusterSetup(self):\n data = {}\n data[\"parameters\"] = self.config.getACSParams()\n \n fqdn = {}\n fqdn[\"master\"] = self.getManagementEndpoint()\n fqdn[\"agent\"] = self.getAgentEndpoint()\n data[\"domains\"] = fqdn\n \n data[\"sshTunnel\"] = \"ssh -o StrictHostKeyChecking=no -L 80:localhost:80 -N \" + self.config.get('ACS', 'username') + \"@\" + self.getManagementEndpoint() + \" -p 2200\"\n\n azure = {}\n azure['resourceGroup'] = self.config.get('Group', 'name')\n data[\"azure\"] = azure\n\n return data"
] |
[
"0.8328459",
"0.7013146",
"0.66061956",
"0.62437",
"0.6242223",
"0.6079759",
"0.6033155",
"0.5956672",
"0.59050727",
"0.5850278",
"0.5808023",
"0.57922196",
"0.57898855",
"0.5735324",
"0.5733755",
"0.57264936",
"0.57036024",
"0.56576514",
"0.5641072",
"0.56382865",
"0.55673623",
"0.55576944",
"0.5517341",
"0.5487182",
"0.5482333",
"0.5482333",
"0.54786223",
"0.54664224",
"0.5452285",
"0.5418142"
] |
0.76427263
|
1
|
Update identity for the ManagedCluster object.
|
def update_identity(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
current_identity_type = "spn"
current_user_assigned_identity = ""
if mc.identity is not None:
current_identity_type = mc.identity.type.casefold()
if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:
current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]
goal_identity_type = current_identity_type
assign_identity = self.context.get_assign_identity()
if self.context.get_enable_managed_identity():
if not assign_identity:
goal_identity_type = "systemassigned"
else:
goal_identity_type = "userassigned"
is_update_identity = ((current_identity_type != goal_identity_type) or
(current_identity_type == goal_identity_type and
current_identity_type == "userassigned" and
assign_identity is not None and
current_user_assigned_identity != assign_identity))
if is_update_identity:
if current_identity_type == "spn":
msg = (
"Your cluster is using service principal, and you are going to update "
"the cluster to use {} managed identity.\nAfter updating, your "
"cluster's control plane and addon pods will switch to use managed "
"identity, but kubelet will KEEP USING SERVICE PRINCIPAL "
"until you upgrade your agentpool.\n"
"Are you sure you want to perform this operation?"
).format(goal_identity_type)
elif current_identity_type != goal_identity_type:
msg = (
"Your cluster is already using {} managed identity, and you are going to "
"update the cluster to use {} managed identity.\n"
"Are you sure you want to perform this operation?"
).format(current_identity_type, goal_identity_type)
else:
msg = (
"Your cluster is already using userassigned managed identity, current control plane identity is {},"
"and you are going to update the cluster identity to {}.\n"
"Are you sure you want to perform this operation?"
).format(current_user_assigned_identity, assign_identity)
# gracefully exit if user does not confirm
if not self.context.get_yes() and not prompt_y_n(msg, default="n"):
raise DecoratorEarlyExitException
# update identity
if goal_identity_type == "systemassigned":
identity = self.models.ManagedClusterIdentity(
type="SystemAssigned"
)
elif goal_identity_type == "userassigned":
user_assigned_identity = {
assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()
}
identity = self.models.ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
mc.identity = identity
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def cluster_identity_modify(self, cluster_location=None, cluster_contact=None, cluster_name=None):\n return self.request( \"cluster-identity-modify\", {\n 'cluster_location': [ cluster_location, 'cluster-location', [ basestring, 'None' ], False ],\n 'cluster_contact': [ cluster_contact, 'cluster-contact', [ basestring, 'None' ], False ],\n 'cluster_name': [ cluster_name, 'cluster-name', [ basestring, 'None' ], False ],\n }, {\n } )",
"def set_up_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity = None\n enable_managed_identity = self.context.get_enable_managed_identity()\n assign_identity = self.context.get_assign_identity()\n if enable_managed_identity and not assign_identity:\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif enable_managed_identity and assign_identity:\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def identity(self) -> pulumi.Input['ClusterIdentityArgs']:\n return pulumi.get(self, \"identity\")",
"def cluster_id(self, cluster_id):\n self._cluster_id = cluster_id",
"def setIdentity(self) -> None:\n ...",
"def set_up_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity_profile = None\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n kubelet_identity = self.context.get_identity_by_msi_client(assign_kubelet_identity)\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n client_id=kubelet_identity.client_id, # TODO: may remove, rp would take care of this\n object_id=kubelet_identity.principal_id # TODO: may remove, rp would take care of this\n )\n }\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id()\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def cluster_node_id(self, cluster_node_id):\n\n self._cluster_node_id = cluster_node_id",
"def identity(self) -> pulumi.Output['outputs.ClusterIdentity']:\n return pulumi.get(self, \"identity\")",
"def identity(self) -> Optional[pulumi.Input['ClusterIdentityArgs']]:\n return pulumi.get(self, \"identity\")",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def set_up_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]",
"def cluster_uuid(self, cluster_uuid):\n\n self._cluster_uuid = cluster_uuid",
"def update_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def mint_a_new_cid(self):\n self.update({\"cid\": self.table.c.cid +1}, condition=None)",
"def add_update_cluster(self, label, cluster):\n self._clusters[label] = cluster",
"def cluster_id(self):\n return self._cluster_id",
"def cluster_num(self, cluster_num):\n\n self._cluster_num = cluster_num",
"def cluster_myid(self, target_node: \"TargetNodesT\") -> ResponseT:\n return self.execute_command(\"CLUSTER MYID\", target_nodes=target_node)",
"def setInfo(label: str, value: str):\r\n\r\n if not self.isClosed:\r\n if label in self.__identity_info.keys():\r\n self.__identity_info[label] = value\r\n else:\r\n raise HDDOPermissionException('Tried to set non-existing identity information in a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to set identity information in a closed HealthDominoDataObject.')",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def _newClusterId(self):\n return self.guidGenerator.new_id()",
"def user_identity(self):\n self.raw['type'] = 'IDENTITY'\n return self"
] |
[
"0.6890525",
"0.6792401",
"0.67554677",
"0.64075714",
"0.6162829",
"0.60769516",
"0.60417324",
"0.6015845",
"0.59331",
"0.5860449",
"0.5834495",
"0.56925756",
"0.55881435",
"0.5503835",
"0.5495691",
"0.5485756",
"0.54500324",
"0.5367659",
"0.53397477",
"0.5334898",
"0.531647",
"0.531582",
"0.52597785",
"0.52597785",
"0.52597785",
"0.52597785",
"0.52597785",
"0.52597785",
"0.5253091",
"0.5245571"
] |
0.7773668
|
0
|
Update azure keyvault secrets provider addon profile.
|
def update_azure_keyvault_secrets_provider_addon_profile(
self,
azure_keyvault_secrets_provider_addon_profile: ManagedClusterAddonProfile,
) -> ManagedClusterAddonProfile:
# determine the value of constants
addon_consts = self.context.get_addon_consts()
CONST_SECRET_ROTATION_ENABLED = addon_consts.get(
"CONST_SECRET_ROTATION_ENABLED"
)
CONST_ROTATION_POLL_INTERVAL = addon_consts.get(
"CONST_ROTATION_POLL_INTERVAL"
)
if self.context.get_enable_secret_rotation():
azure_keyvault_secrets_provider_addon_profile = (
self.ensure_azure_keyvault_secrets_provider_addon_profile(
azure_keyvault_secrets_provider_addon_profile
)
)
azure_keyvault_secrets_provider_addon_profile.config[
CONST_SECRET_ROTATION_ENABLED
] = "true"
if self.context.get_disable_secret_rotation():
azure_keyvault_secrets_provider_addon_profile = (
self.ensure_azure_keyvault_secrets_provider_addon_profile(
azure_keyvault_secrets_provider_addon_profile
)
)
azure_keyvault_secrets_provider_addon_profile.config[
CONST_SECRET_ROTATION_ENABLED
] = "false"
if self.context.get_rotation_poll_interval() is not None:
azure_keyvault_secrets_provider_addon_profile = (
self.ensure_azure_keyvault_secrets_provider_addon_profile(
azure_keyvault_secrets_provider_addon_profile
)
)
azure_keyvault_secrets_provider_addon_profile.config[
CONST_ROTATION_POLL_INTERVAL
] = self.context.get_rotation_poll_interval()
return azure_keyvault_secrets_provider_addon_profile
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def build_azure_keyvault_secrets_provider_addon_profile(self) -> ManagedClusterAddonProfile:\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_SECRET_ROTATION_ENABLED = addon_consts.get(\n \"CONST_SECRET_ROTATION_ENABLED\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n azure_keyvault_secrets_provider_addon_profile = (\n self.models.ManagedClusterAddonProfile(\n enabled=True,\n config={\n CONST_SECRET_ROTATION_ENABLED: \"false\",\n CONST_ROTATION_POLL_INTERVAL: \"2m\",\n },\n )\n )\n if self.context.get_enable_secret_rotation():\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_SECRET_ROTATION_ENABLED\n ] = \"true\"\n if self.context.get_rotation_poll_interval() is not None:\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_ROTATION_POLL_INTERVAL\n ] = self.context.get_rotation_poll_interval()\n return azure_keyvault_secrets_provider_addon_profile",
"def update_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\n \"CONST_MONITORING_ADDON_NAME\"\n )\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\n \"CONST_INGRESS_APPGW_ADDON_NAME\"\n )\n CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get(\n \"CONST_VIRTUAL_NODE_ADDON_NAME\"\n )\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n azure_keyvault_secrets_provider_addon_profile = None\n if mc.addon_profiles is not None:\n monitoring_addon_enabled = (\n CONST_MONITORING_ADDON_NAME in mc.addon_profiles and\n mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled\n )\n ingress_appgw_addon_enabled = (\n CONST_INGRESS_APPGW_ADDON_NAME in mc.addon_profiles and\n mc.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled\n )\n virtual_node_addon_enabled = (\n CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type() in mc.addon_profiles and\n mc.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type()].enabled\n )\n # set intermediates, used later to ensure role assignments\n self.context.set_intermediate(\n \"monitoring_addon_enabled\", monitoring_addon_enabled, overwrite_exists=True\n )\n self.context.set_intermediate(\n \"ingress_appgw_addon_enabled\", ingress_appgw_addon_enabled, overwrite_exists=True\n )\n self.context.set_intermediate(\n \"virtual_node_addon_enabled\", virtual_node_addon_enabled, overwrite_exists=True\n )\n # get azure keyvault secrets provider profile\n azure_keyvault_secrets_provider_addon_profile = mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n )\n\n # update azure keyvault secrets provider profile\n azure_keyvault_secrets_provider_addon_profile = (\n self.update_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n if azure_keyvault_secrets_provider_addon_profile:\n # mc.addon_profiles should not be None if azure_keyvault_secrets_provider_addon_profile is not None\n mc.addon_profiles[\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ] = azure_keyvault_secrets_provider_addon_profile\n return mc",
"def UpdateSecretKey():\n _LOG.info('Updating webapp2_secret_key.')\n webapp2_secret_key = Webapp2SecretKey(id='current_secret_key')\n webapp2_secret_key.secret_key = os.urandom(16).encode('hex')\n webapp2_secret_key.put()\n return True",
"def update(secret: str, value: str, env: Optional[str], config: str) -> None:\n layer = Layer.load_from_yaml(config, env)\n gen_all(layer)\n _raise_if_no_k8s_cluster_exists(layer)\n\n configure_kubectl(layer)\n amplitude_client.send_event(amplitude_client.UPDATE_SECRET_EVENT)\n secret_value = base64.b64encode(value.encode(\"utf-8\")).decode(\"utf-8\")\n patch = [{\"op\": \"replace\", \"path\": f\"/data/{secret}\", \"value\": secret_value}]\n load_kube_config()\n v1 = CoreV1Api()\n v1.patch_namespaced_secret(\"secret\", layer.name, patch)\n\n print(\"Success\")",
"def secrets():\n click.echo(STEP_PATH / \"secrets\")",
"def update_my_user_profile(SshPublicKey=None):\n pass",
"def apply_secrets():\n for name, value in Secrets.__dict__.items():\n if name[0] != '_':\n os.environ[name] = value",
"def add_ipsec_secrets(self, **kwargs):\r\n\r\n if 'auth_type' not in kwargs:\r\n self.linux_handle.log(level='ERROR', message=\"Mandatory Argument 'auth_type' is missing\")\r\n raise Exception(\"Mandatory Argument 'auth_type' is missing\")\r\n auth_type = kwargs.get('auth_type')\r\n ipsec_secret_file = self.conf_dir + '/ipsec.secrets'\r\n result = self.linux_handle.shell(command='ls ' + ipsec_secret_file).response()\r\n if not re.search(r'No such file or directory', result):\r\n self.linux_handle.log(\"Moving existing %s to %s.orig\" % (ipsec_secret_file, ipsec_secret_file))\r\n cmd = \"mv -f %s %s.orig\" % (ipsec_secret_file, ipsec_secret_file)\r\n self.linux_handle.shell(command=cmd)\r\n line = ''\r\n if auth_type.lower() == 'PSK'.lower():\r\n if 'preshared_key' not in kwargs:\r\n self.linux_handle.log(level='ERROR', message=\"For auth_type=psk, argument 'preshared_key' is mandatory\")\r\n raise Exception(\"Missing argument: For auth_type=psk, argument 'preshared_key' is mandatory\")\r\n if 'host_id' in kwargs:\r\n line = kwargs.get('host_id') + ' '\r\n if 'peer_id' in kwargs:\r\n line = line + ' ' + kwargs.get('peer_id') + ' '\r\n line = line + ' : PSK \"' + kwargs.get('preshared_key') + '\"'\r\n else:\r\n if 'local_cert' not in kwargs:\r\n self.linux_handle.log(level='ERROR', message=\" 'local_cert' is mandatory argument\")\r\n raise Exception(\"'local_cert' is mandatory argument\")\r\n line = ' : ' + auth_type.upper() + ' ' + kwargs.get('local_cert')\r\n if 'passphrase' in kwargs:\r\n line = line + ' ' + kwargs.get('passphrase')\r\n self.linux_handle.log('Adding %s into secrets file' %line)\r\n\r\n xauth = None\r\n if 'xauth_user' in kwargs and 'xauth_pwd' in kwargs:\r\n xauth = kwargs.get('xauth_user') + ' : XAUTH ' + kwargs.get('xauth_pwd')\r\n\r\n with open('ipsec.secrets', 'w') as out:\r\n out.write(line + \"\\n\")\r\n if xauth is not None:\r\n out.write(xauth + \"\\n\")\r\n out.close()\r\n\r\n if not self.linux_handle.upload(local_file='ipsec.secrets', remote_file=ipsec_secret_file,\r\n protocol='scp'):\r\n self.linux_handle.log(\"Uploading ipsec.secrets file failed\")\r\n raise Exception(\"Uploading ipsec.secrets file failed\")\r\n\r\n self.linux_handle.log(\"Updating ipsec.secrets file successfull\")\r\n return True",
"def set_profile_version(context, profile_id, version):\n\n check_profile_id(profile_id)\n ps = getToolByName(context, 'portal_setup')\n\n ps.setLastVersionForProfile(profile_id, unicode(version))\n assert(ps.getLastVersionForProfile(profile_id) == (version, ))\n print \"Set version for '%s' to '%s'.\" % (profile_id, version)",
"def configure(self, account_id):\n print(\"Configuring Vault\")\n client = self.connect(VAULT_TOKEN)\n\n # Audit Backend\n if 'syslog/' not in client.sys.list_enabled_audit_devices():\n audit_options = {\n 'log_raw': 'True',\n }\n client.sys.enable_audit_device('syslog', options=audit_options)\n else:\n print(\"audit_backend already created.\")\n\n # Policies\n policies = []\n path = os.path.join(POLICY_DIR, \"*.hcl\")\n for policy in glob.glob(path):\n name = os.path.basename(policy).split('.')[0]\n policies.append(name)\n with open(policy, 'r') as fh:\n client.sys.create_or_update_policy(name, fh.read())\n\n # AWS Authentication Backend\n # Enable AWS auth in Vault\n if 'aws/' not in client.sys.list_auth_methods():\n try:\n client.sys.enable_auth_method('aws')\n except Exception as e:\n raise VaultError(\"Error while enabling auth back end. {}\".format(e))\n else:\n print(\"aws auth backend already created.\")\n\n #Define policies and arn \n arn = 'arn:aws:iam::{}:instance-profile/'.format(account_id)\n\n #For each policy configure the policies on a role of the same name\n for policy in policies:\n client.create_ec2_role(policy,\n bound_iam_instance_profile_arn = arn + policy,\n policies = policy,\n mount_point = 'aws')\n print('Successful write to aws/role/' + policy)\n \n # AWS Secret Backend\n if 'aws/' not in client.sys.list_mounted_secrets_engines():\n try:\n client.sys.enable_secrets_engine('aws')\n except Exception as e:\n raise VaultError('Error while enabling secret back end. {}'.format(e))\n else:\n print(\"aws secret backend already created.\")\n\n path = os.path.join(POLICY_DIR, \"*.iam\")\n for iam in glob.glob(path):\n name = os.path.basename(iam).split('.')[0]\n with open(iam, 'r') as fh:\n # if we json parse the file first we can use the duplicate key trick for comments\n client.secrets.aws.create_or_update_role(name, 'iam_user', policy_document = fh.read())",
"def _configure_ipsec_secrets(self, ipsec_confs):\n secrets_tpl = '../config/tpl/ipsec/ipsec.secrets'\n secret_confs = []\n\n for name, conf in ipsec_confs.items():\n secret_conf = {\n 'right_public_ip': conf['right_public_ip'],\n 'psk': env.get('ipsec_psk_%s' % name),\n }\n secret_confs.append(secret_conf)\n\n # Configure the /etc/ipsec.d/<name>.conf file with passwords\n with hide(*fab_output_hides):\n return upload_template_changed(\n secrets_tpl,\n '/etc/ipsec.secrets',\n context={'confs': secret_confs},\n use_sudo=True,\n mode=0600,\n use_jinja=True\n )",
"def update(self,\n ike_profile_id,\n ip_sec_vpn_ike_profile,\n ):\n return self._invoke('update',\n {\n 'ike_profile_id': ike_profile_id,\n 'ip_sec_vpn_ike_profile': ip_sec_vpn_ike_profile,\n })",
"async def admin_secret(self, ctx: commands.Context, *token: str):\n the_token = await self.config.secret()\n token = ' '.join(token)\n if not token:\n await ctx.author.send(f'Team management secret: {the_token}')\n else:\n await self.config.secret.set(token)\n message = [display(ctx.author),\n f'set the team management secret to {token}.']\n if the_token:\n message.append(f'(was `{the_token}`)')\n await self.admin_msg(' '.join(message))",
"def use_mfa_secret_from_vault(\n self, vault_name: str, vault_key: str, mode: OTPMode = OTPMode.TIME\n ):\n secrets = Vault().get_secret(vault_name)\n if mode == OTPMode.TIME:\n self.set_time_based_otp(secrets[vault_key])\n elif mode == OTPMode.COUNTER:\n self.set_counter_based_otp(secrets[vault_key])",
"def update_secrets(self, id: str, body: dict[str, Any]) -> dict[str, Any]:\n return self.client.patch(self._url(\"%s/secrets\" % id), data=body)",
"def populate_secrets_pre(vault_secret_keys, core_auth_cookies, extra_fns):\n\n for path in vault_secret_keys:\n vault.ensure_secret_key(path)\n\n for fn in extra_fns:\n if fn:\n fn(vault, config, random_secret)\n\n for name in core_auth_cookies:\n vault.ensure_secret(f'liquid/{name}/cookie', lambda: {\n 'cookie': random_secret(64),\n })",
"async def osukey(self, ctx, key):\r\n\r\n # Load config\r\n config_boards = await self.config.apikey()\r\n\r\n # Set new config\r\n await self.config.apikey.set(key)\r\n await ctx.send(\"The apikey has been added.\")",
"def _update_input_config(input_config,secret_key):\n\n for key in input_config.keys():\n if input_config[key].get('arguments') is None:\n input_config[key]['arguments'] = {'secret':secret_key}\n elif input_config[key]['arguments'].get('secret') is None:\n input_config[key]['arguments']['secret'] = secret_key",
"async def test_not_update_with_account_token(self):\n provisioning_client = ProvisioningProfileClient(httpClient, 'token')\n try:\n await provisioning_client.update_provisioning_profile('id', {'name': 'new name'})\n except Exception as err:\n assert err.__str__() == 'You can not invoke update_provisioning_profile method, because you ' + \\\n 'have connected with account access token. Please use API access token from ' + \\\n 'https://app.metaapi.cloud/token page to invoke this method.'",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )",
"def inject_secrets(self, secrets: str) -> None:\n self.config.read(secrets)",
"def ReplaceSecret(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceSecret')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def ReplaceSecret(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceSecret')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def step_update(test, checks=None):\n if checks is None:\n checks = []\n test.cmd(\n \"az networkcloud virtualmachine console update \"\n \"--enabled {enabled} --expiration {newExpiration} \"\n \"--ssh-public-key {sshPublicKey} --tags {tagsUpdate} \"\n \"--resource-group {resourceGroup} --virtual-machine-name {virtualMachineName}\",\n checks=checks,\n )",
"def with_secrets(self, kind, source):\n\n if kind == \"vault\" and isinstance(source, list):\n source = {\"project\": self.metadata.project, \"secrets\": source}\n\n self.spec.secret_sources.append({\"kind\": kind, \"source\": source})\n return self",
"def add_to_ipsec_secrets(strongswan_obj, **kwargs):\r\n\r\n return strongswan_obj.add_ipsec_secrets(**kwargs)",
"def resetSecret(self):\n self.secret = str(uuid())\n self.put()",
"def test_vault_update_vault_section(self):\n pass",
"def upgrade_security_controlpanel_settings(context):\n def _get_enable_self_reg():\n app_perms = portal.rolesOfPermission(permission='Add portal member')\n for appperm in app_perms:\n if appperm['name'] == 'Anonymous' and \\\n appperm['selected'] == 'SELECTED':\n return True\n return False\n\n # get the old site properties\n portal_url = getToolByName(context, 'portal_url')\n portal = portal_url.getPortalObject()\n portal_properties = getToolByName(portal, \"portal_properties\")\n site_properties = portal_properties.site_properties\n\n # get the new registry\n registry = getUtility(IRegistry)\n\n # XXX: Somehow this code is executed for old migration steps as well\n # ( < Plone 4 ) and breaks because there is no registry. Looking up the\n # registry interfaces with 'check=False' will not work, because it will\n # return a settings object and then fail when we try to access the\n # attributes.\n try:\n settings = registry.forInterface(\n ISecuritySchema,\n prefix='plone',\n )\n except KeyError:\n settings = False\n if settings:\n settings.enable_self_reg = _get_enable_self_reg()\n validate_email = portal.getProperty('validate_email', True)\n if validate_email:\n settings.enable_user_pwd_choice = False\n else:\n settings.enable_user_pwd_choice = True\n pmembership = getToolByName(portal, 'portal_membership')\n settings.enable_user_folders = pmembership.getMemberareaCreationFlag()\n settings.allow_anon_views_about = site_properties.getProperty(\n 'allowAnonymousViewAbout', False)\n settings.use_email_as_login = site_properties.getProperty(\n 'use_email_as_login', False)\n settings.use_uuid_as_userid = site_properties.getProperty(\n 'use_uuid_as_userid', False)",
"def update_settings(command):\n namespace = app.main(command)\n assert namespace.command == 'u' or namespace.command == \"updatesettings\""
] |
[
"0.6497985",
"0.5921258",
"0.5528834",
"0.5507355",
"0.5399552",
"0.5295169",
"0.52810067",
"0.5180465",
"0.51292896",
"0.50870067",
"0.5064962",
"0.49963173",
"0.49906343",
"0.49508905",
"0.4913882",
"0.4892998",
"0.48583922",
"0.4851113",
"0.4835317",
"0.48326787",
"0.4824346",
"0.48236543",
"0.48236543",
"0.48009136",
"0.4798929",
"0.47956124",
"0.47807238",
"0.4776747",
"0.47390416",
"0.47219175"
] |
0.78517073
|
0
|
Update addon profiles for the ManagedCluster object.
|
def update_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
# determine the value of constants
addon_consts = self.context.get_addon_consts()
CONST_MONITORING_ADDON_NAME = addon_consts.get(
"CONST_MONITORING_ADDON_NAME"
)
CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(
"CONST_INGRESS_APPGW_ADDON_NAME"
)
CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get(
"CONST_VIRTUAL_NODE_ADDON_NAME"
)
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(
"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME"
)
azure_keyvault_secrets_provider_addon_profile = None
if mc.addon_profiles is not None:
monitoring_addon_enabled = (
CONST_MONITORING_ADDON_NAME in mc.addon_profiles and
mc.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
)
ingress_appgw_addon_enabled = (
CONST_INGRESS_APPGW_ADDON_NAME in mc.addon_profiles and
mc.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
)
virtual_node_addon_enabled = (
CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type() in mc.addon_profiles and
mc.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + self.context.get_virtual_node_addon_os_type()].enabled
)
# set intermediates, used later to ensure role assignments
self.context.set_intermediate(
"monitoring_addon_enabled", monitoring_addon_enabled, overwrite_exists=True
)
self.context.set_intermediate(
"ingress_appgw_addon_enabled", ingress_appgw_addon_enabled, overwrite_exists=True
)
self.context.set_intermediate(
"virtual_node_addon_enabled", virtual_node_addon_enabled, overwrite_exists=True
)
# get azure keyvault secrets provider profile
azure_keyvault_secrets_provider_addon_profile = mc.addon_profiles.get(
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME
)
# update azure keyvault secrets provider profile
azure_keyvault_secrets_provider_addon_profile = (
self.update_azure_keyvault_secrets_provider_addon_profile(
azure_keyvault_secrets_provider_addon_profile
)
)
if azure_keyvault_secrets_provider_addon_profile:
# mc.addon_profiles should not be None if azure_keyvault_secrets_provider_addon_profile is not None
mc.addon_profiles[
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME
] = azure_keyvault_secrets_provider_addon_profile
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def set_up_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\n \"CONST_MONITORING_ADDON_NAME\"\n )\n CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get(\n \"CONST_VIRTUAL_NODE_ADDON_NAME\"\n )\n CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME = addon_consts.get(\n \"CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME\"\n )\n CONST_KUBE_DASHBOARD_ADDON_NAME = addon_consts.get(\n \"CONST_KUBE_DASHBOARD_ADDON_NAME\"\n )\n CONST_AZURE_POLICY_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_POLICY_ADDON_NAME\"\n )\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\n \"CONST_INGRESS_APPGW_ADDON_NAME\"\n )\n CONST_CONFCOM_ADDON_NAME = addon_consts.get(\"CONST_CONFCOM_ADDON_NAME\")\n CONST_OPEN_SERVICE_MESH_ADDON_NAME = addon_consts.get(\n \"CONST_OPEN_SERVICE_MESH_ADDON_NAME\"\n )\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n addon_profiles = {}\n # error out if any unrecognized or duplicate addon provided\n # error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is\n # error out if '--enable-addons=virtual-node' is set but aci_subnet_name and vnet_subnet_id are not\n addons = self.context.get_enable_addons()\n if \"http_application_routing\" in addons:\n addon_profiles[\n CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME\n ] = self.build_http_application_routing_addon_profile()\n if \"kube-dashboard\" in addons:\n addon_profiles[\n CONST_KUBE_DASHBOARD_ADDON_NAME\n ] = self.build_kube_dashboard_addon_profile()\n if \"monitoring\" in addons:\n addon_profiles[\n CONST_MONITORING_ADDON_NAME\n ] = self.build_monitoring_addon_profile()\n if \"azure-policy\" in addons:\n addon_profiles[\n CONST_AZURE_POLICY_ADDON_NAME\n ] = self.build_azure_policy_addon_profile()\n if \"virtual-node\" in addons:\n # TODO: how about aciConnectorwindows, what is its addon name?\n os_type = self.context.get_virtual_node_addon_os_type()\n addon_profiles[\n CONST_VIRTUAL_NODE_ADDON_NAME + os_type\n ] = self.build_virtual_node_addon_profile()\n if \"ingress-appgw\" in addons:\n addon_profiles[\n CONST_INGRESS_APPGW_ADDON_NAME\n ] = self.build_ingress_appgw_addon_profile()\n if \"confcom\" in addons:\n addon_profiles[\n CONST_CONFCOM_ADDON_NAME\n ] = self.build_confcom_addon_profile()\n if \"open-service-mesh\" in addons:\n addon_profiles[\n CONST_OPEN_SERVICE_MESH_ADDON_NAME\n ] = self.build_open_service_mesh_addon_profile()\n if \"azure-keyvault-secrets-provider\" in addons:\n addon_profiles[\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ] = self.build_azure_keyvault_secrets_provider_addon_profile()\n mc.addon_profiles = addon_profiles\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def update_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def update_network_plugin_settings(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n network_plugin_mode = self.context.get_network_plugin_mode()\n if network_plugin_mode:\n mc.network_profile.network_plugin_mode = network_plugin_mode\n\n (\n pod_cidr,\n _,\n _,\n _,\n _\n ) = self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n\n network_dataplane = self.context.get_network_dataplane()\n if network_dataplane:\n mc.network_profile.network_dataplane = network_dataplane\n\n if pod_cidr:\n mc.network_profile.pod_cidr = pod_cidr\n return mc",
"def update_profile(self, channels=None): # pragma: no cover\n pass",
"def set_up_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n api_server_access_profile = None\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n enable_private_cluster = self.context.get_enable_private_cluster()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n private_dns_zone = self.context.get_private_dns_zone()\n if api_server_authorized_ip_ranges or enable_private_cluster:\n api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile(\n authorized_ip_ranges=api_server_authorized_ip_ranges,\n enable_private_cluster=True if enable_private_cluster else None,\n enable_private_cluster_public_fqdn=False if disable_public_fqdn else None,\n private_dns_zone=private_dns_zone\n )\n mc.api_server_access_profile = api_server_access_profile\n\n fqdn_subdomain = self.context.get_fqdn_subdomain()\n mc.fqdn_subdomain = fqdn_subdomain\n return mc",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )",
"def build_kube_dashboard_addon_profile(self) -> ManagedClusterAddonProfile:\n kube_dashboard_addon_profile = self.models.ManagedClusterAddonProfile(\n enabled=True,\n )\n return kube_dashboard_addon_profile",
"def test_patch_hyperflex_cluster_profile(self):\n pass",
"def set_up_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_profile = None\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel:\n auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile(upgrade_channel=auto_upgrade_channel)\n mc.auto_upgrade_profile = auto_upgrade_profile\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def Adjust_Profile_List( self ):\r\n listing = self.system.Profile_List() #Get the list of current profiles\r\n d=DialogProfileAdjust.DialogProfileAdjustment( self.root, listing, 'Profiles', 'Organize the Profiles' )\r\n if( d.return_state == 0 ):\r\n return #Cancel hit\r\n #Go through d's profile list, and try to add names not seen before\r\n for item in d.profile_list:\r\n self.system.Add_Empty_Profile( item )\r\n #Go through d's remove list, and try to remove names if they existed\r\n for name in d.remove_list:\r\n self.system.Remove_Profile( name )",
"def test_update_hyperflex_node_profile(self):\n pass",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update(self, profiles, matches):\n raise NotImplementedError()",
"def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_disable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(\n enabled=False\n )\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set enabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n\n if self.context.get_disable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set disabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False\n\n return mc"
] |
[
"0.6944433",
"0.68986964",
"0.628397",
"0.62623245",
"0.6228979",
"0.6208819",
"0.62029546",
"0.6185522",
"0.59600425",
"0.585744",
"0.5846797",
"0.5775247",
"0.5593754",
"0.5517053",
"0.54938537",
"0.5422821",
"0.5410289",
"0.5373",
"0.5363987",
"0.5341301",
"0.5306761",
"0.527748",
"0.5273452",
"0.5207454",
"0.520383",
"0.5202678",
"0.51970494",
"0.51823294",
"0.51440936",
"0.51335824"
] |
0.74111545
|
0
|
Update storage profile for the ManagedCluster object.
|
def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
mc.storage_profile = self.context.get_storage_profile()
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if hasattr(self.models, \"ManagedClusterStorageProfile\"):\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def set_vm_storage_profile(vm, profile):\n\n spec = vim.vm.ConfigSpec()\n profile_specs = []\n profile_spec = vim.vm.DefinedProfileSpec()\n profile_spec.profileId = profile.profileId.uniqueId\n profile_specs.append(profile_spec)\n spec.vmProfile = profile_specs\n vm.ReconfigVM_Task(spec)",
"def get_storage_profile(self) -> Optional[ManagedClusterStorageProfile]:\n profile = self.models.ManagedClusterStorageProfile()\n if self.mc.storage_profile is not None:\n profile = self.mc.storage_profile\n profile.disk_csi_driver = self.get_disk_driver()\n profile.file_csi_driver = self.get_file_driver()\n profile.blob_csi_driver = self.get_blob_driver()\n profile.snapshot_controller = self.get_snapshot_controller()\n\n return profile",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def test_update_hyperflex_cluster_storage_policy(self):\n pass",
"def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def storage_at_clusters(self, storage_at_clusters):\n\n self._storage_at_clusters = storage_at_clusters",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def set_virtual_disk_storage_profile(vm, hardware_device, profile):\n\n spec = vim.vm.ConfigSpec()\n device_specs = []\n profile_specs = []\n profile_spec = vim.vm.DefinedProfileSpec()\n profile_spec.profileId = profile.profileId.uniqueId\n profile_specs.append(profile_spec)\n\n device_spec = vim.vm.device.VirtualDeviceSpec()\n device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit\n device_spec.device = hardware_device\n device_spec.profile = profile_specs\n device_specs.append(device_spec)\n spec.deviceChange = device_specs\n vm.ReconfigVM_Task(spec)",
"def update_cluster_config(self, clusterid, config, **kwargs):\n pass",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def add_update_cluster(self, label, cluster):\n self._clusters[label] = cluster",
"def add_to_cluster_dictionary(self, cluster):\n\n self.clusters[self.cluster_idx] = cluster\n update_cluster_array(self, cluster, cluster.cluster_idx)\n\n return",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster"
] |
[
"0.7120557",
"0.6314157",
"0.6202867",
"0.58940035",
"0.5887002",
"0.5872036",
"0.5859722",
"0.5836123",
"0.57843363",
"0.5757808",
"0.5714045",
"0.5625429",
"0.55987877",
"0.54697853",
"0.54638046",
"0.5455607",
"0.54530305",
"0.53886455",
"0.53717756",
"0.5316751",
"0.5289311",
"0.5286942",
"0.52857023",
"0.52525586",
"0.52305865",
"0.5222627",
"0.5154871",
"0.5146749",
"0.5058024",
"0.5058024"
] |
0.81250465
|
0
|
Update defender for the ManagedCluster object.
|
def update_defender(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
defender = self.context.get_defender_config()
if defender:
if mc.security_profile is None:
mc.security_profile = self.models.ManagedClusterSecurityProfile()
mc.security_profile.defender = defender
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def update_cluster_config(self, clusterid, config, **kwargs):\n pass",
"def patch_cluster(self, cluster, *args, **kwargs):\n raise NotImplementedError",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def add_update_cluster(self, label, cluster):\n self._clusters[label] = cluster",
"def defender(self) -> Optional[pulumi.Input['ManagedClusterSecurityProfileDefenderArgs']]:\n return pulumi.get(self, \"defender\")",
"def _handle_coordinator_update(self) -> None:\n self._update_attrs()\n self.async_write_ha_state()",
"def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]",
"def addClusterToDoc(self, lCluster):\n raise Exception(\"To be specialised!\")",
"def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def _init_cluster(self):\n self._Init_Cluster()",
"def _handle_coordinator_update(self) -> None:\n self._update_attrs()\n return super()._handle_coordinator_update()",
"def ModifyCluster(self, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n body = kwargs\n\n return self._SendRequest(HTTP_PUT,\n \"/%s/modify\" % GANETI_RAPI_VERSION, query, body)",
"def update_cluster_freq(self, state, cpu_id):\n # For IKS devices cluster changes only possible when\n # freq changes, for other it is determine by cpu_id.\n if self.device.scheduler != 'iks':\n self.current_cluster = self.get_cluster(cpu_id, state)\n if self.get_state_name(state) == \"freqstate\":\n self.current_cluster = self.get_cluster(cpu_id, state)\n self.current_frequency_of_clusters[self.current_cluster] = state",
"def _handle_coordinator_update(self) -> None:\n self.update_from_latest_data()\n self.async_write_ha_state()",
"def __init_cluster(self, cluster):\n self.___init_nodes(cluster)\n self.__clusterop.async_rebalance(\n cluster.get_nodes(),\n cluster.get_nodes()[1:],\n []).result()",
"def _handle_coordinator_update(self) -> None:\n self._update_data()\n self.async_write_ha_state()",
"def setBestCluster(cluster):\r\n global bestCluster\r\n bestCluster = cluster",
"def _handle_coordinator_update(self) -> None:\n self._update_from_rest_data()\n self.async_write_ha_state()",
"def update_cluster_hosts(self, hosts):\n self._hosts = hosts\n self._collect_hosts_d = True",
"def conf_update(self):\n pass",
"def add_to_cluster_dictionary(self, cluster):\n\n self.clusters[self.cluster_idx] = cluster\n update_cluster_array(self, cluster, cluster.cluster_idx)\n\n return",
"def setup(self, cluster):\n raise NotImplementedError()",
"def version_recluster(self, serial, cluster,\n update_statistics_ancestors_depth=None):\n\n props = self.version_get_properties(serial)\n if not props:\n return\n node = props[NODE]\n size = props[SIZE]\n oldcluster = props[CLUSTER]\n if cluster == oldcluster:\n return\n\n mtime = time()\n self.statistics_update_ancestors(node, -1, -size, mtime, oldcluster,\n update_statistics_ancestors_depth)\n self.statistics_update_ancestors(node, 1, size, mtime, cluster,\n update_statistics_ancestors_depth)\n\n q = \"update versions set cluster = ? where serial = ?\"\n self.execute(q, (cluster, serial))"
] |
[
"0.663469",
"0.5969144",
"0.5808746",
"0.57962227",
"0.56318694",
"0.56318694",
"0.56318694",
"0.56318694",
"0.56318694",
"0.56318694",
"0.5557113",
"0.5524853",
"0.55183303",
"0.5509389",
"0.54063356",
"0.53238606",
"0.5301363",
"0.52094877",
"0.51349497",
"0.51327986",
"0.5109798",
"0.5108088",
"0.5100452",
"0.5089102",
"0.50690407",
"0.5050187",
"0.5010163",
"0.49969327",
"0.4968284",
"0.49672556"
] |
0.75369984
|
0
|
Update workload identity profile for the ManagedCluster object.
|
def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
profile = self.context.get_workload_identity_profile()
if profile:
if mc.security_profile is None:
mc.security_profile = self.models.ManagedClusterSecurityProfile()
mc.security_profile.workload_identity = profile
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def set_up_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def set_up_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity_profile = None\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n kubelet_identity = self.context.get_identity_by_msi_client(assign_kubelet_identity)\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n client_id=kubelet_identity.client_id, # TODO: may remove, rp would take care of this\n object_id=kubelet_identity.principal_id # TODO: may remove, rp would take care of this\n )\n }\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id()\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def set_up_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity = None\n enable_managed_identity = self.context.get_enable_managed_identity()\n assign_identity = self.context.get_assign_identity()\n if enable_managed_identity and not assign_identity:\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif enable_managed_identity and assign_identity:\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def cluster_identity_modify(self, cluster_location=None, cluster_contact=None, cluster_name=None):\n return self.request( \"cluster-identity-modify\", {\n 'cluster_location': [ cluster_location, 'cluster-location', [ basestring, 'None' ], False ],\n 'cluster_contact': [ cluster_contact, 'cluster-contact', [ basestring, 'None' ], False ],\n 'cluster_name': [ cluster_name, 'cluster-name', [ basestring, 'None' ], False ],\n }, {\n } )",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_disable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(\n enabled=False\n )\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set enabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n\n if self.context.get_disable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set disabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False\n\n return mc",
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def set_up_service_principal_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # If customer explicitly provide a service principal, disable managed identity.\n (\n service_principal,\n client_secret,\n ) = self.context.get_service_principal_and_client_secret()\n enable_managed_identity = self.context.get_enable_managed_identity()\n # Skip create service principal profile for the cluster if the cluster enables managed identity\n # and customer doesn't explicitly provide a service principal.\n if not (\n enable_managed_identity and\n not service_principal and\n not client_secret\n ):\n service_principal_profile = (\n self.models.ManagedClusterServicePrincipalProfile(\n client_id=service_principal, secret=client_secret\n )\n )\n mc.service_principal_profile = service_principal_profile\n return mc",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def set_up_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def test_patch_hyperflex_cluster_profile(self):\n pass",
"def set_up_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler(enabled=True)\n else:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc"
] |
[
"0.7672089",
"0.7095717",
"0.69768775",
"0.67233837",
"0.65568775",
"0.6508077",
"0.6300794",
"0.62733924",
"0.6218579",
"0.61879885",
"0.60814923",
"0.591891",
"0.5907758",
"0.5855132",
"0.58442307",
"0.5833491",
"0.5806006",
"0.56718785",
"0.56357676",
"0.55347407",
"0.5531072",
"0.54816675",
"0.54179484",
"0.5400498",
"0.5385012",
"0.53416747",
"0.53306603",
"0.532914",
"0.53207487",
"0.5308297"
] |
0.80174404
|
0
|
Update supportPlan for the ManagedCluster object.
|
def update_k8s_support_plan(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
support_plan = self.context.get_k8s_support_plan()
if support_plan == KubernetesSupportPlan.AKS_LONG_TERM_SUPPORT:
if mc is None or mc.sku is None or mc.sku.tier.lower() != CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM.lower():
raise AzCLIError("Long term support is only available for premium tier clusters.")
mc.support_plan = support_plan
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_k8s_support_plan(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n support_plan = self.context.get_k8s_support_plan()\n if support_plan == KubernetesSupportPlan.AKS_LONG_TERM_SUPPORT:\n if mc is None or mc.sku is None or mc.sku.tier.lower() != CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM.lower():\n raise AzCLIError(\"Long term support is only available for premium tier clusters.\")\n\n mc.support_plan = support_plan\n return mc",
"def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def patch_cluster(self, cluster: Union[dto.Cluster, str]) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def update_kubernetes_cluster(\n self,\n cluster: Union[dto.KubernetesCluster, str],\n template: Union[dto.KubernetesClusterTemplate, str]\n ) -> dto.KubernetesCluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def patch_cluster(self, cluster, *args, **kwargs):\n raise NotImplementedError",
"def get_k8s_support_plan(self) -> Union[str, None]:\n # default to None\n support_plan = None\n # try to read the property value corresponding to the parameter from the `mc` object\n if self.mc and hasattr(self.mc, \"support_plan\") and self.mc.support_plan is not None:\n support_plan = self.mc.support_plan\n\n # if specified by customer, use the specified value\n support_plan = self.raw_param.get(\"k8s_support_plan\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return support_plan",
"def update(cls, plan_id, **kwargs):\n return cls().requests.put(f\"plan/{plan_id}\", data=kwargs,)",
"def add_update_cluster(self, label, cluster):\n self._clusters[label] = cluster",
"def set_support(self, support):\n self.support = round(support, 3)",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def test_update_hyperflex_cluster(self):\n pass",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_storage_plan(user, storage_plan):\n plans = StoragePlan.query\n storage_plan = plans.filter(StoragePlan.id == storage_plan).first()\n user.storage_plan_id = storage_plan.id\n user.storage_plan = storage_plan\n db.session.commit()",
"def update_vsan_cluster(self, cluster_id, **kwargs):\n put_body = json.dumps({'cluster': kwargs})\n resp, body = self.put('clusters/%s' % cluster_id, put_body)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_placement(session, cluster, vm_ref, group_info):\n cluster_config = session._call_method(\n vutil, \"get_object_property\", cluster, \"configurationEx\")\n\n if cluster_config:\n group = _get_vm_group(cluster_config, group_info)\n client_factory = session.vim.client.factory\n config_spec = client_factory.create('ns0:ClusterConfigSpecEx')\n\n if not group:\n \"\"\"Creating group\"\"\"\n config_spec.groupSpec = _create_vm_group_spec(\n client_factory, group_info, [vm_ref], operation=\"add\",\n group=group)\n\n if group:\n # VM group exists on the cluster which is assumed to be\n # created by VC admin. Add instance to this vm group and let\n # the placement policy defined by the VC admin take over\n config_spec.groupSpec = _create_vm_group_spec(\n client_factory, group_info, [vm_ref], operation=\"edit\",\n group=group)\n\n # If server group policies are defined (by tenants), then\n # create/edit affinity/anti-affinity rules on cluster.\n # Note that this might be add-on to the existing vm group\n # (mentioned above) policy defined by VC admin i.e if VC admin has\n # restricted placement of VMs to a specific group of hosts, then\n # the server group policy from nova might further restrict to\n # individual hosts on a cluster\n if group_info.policies:\n # VM group does not exist on cluster\n policy = group_info.policies[0]\n if policy != 'soft-affinity':\n rule_name = \"%s-%s\" % (group_info.uuid, policy)\n rule = _get_rule(cluster_config, rule_name)\n operation = \"edit\" if rule else \"add\"\n config_spec.rulesSpec = _create_cluster_rules_spec(\n client_factory, rule_name, [vm_ref], policy=policy,\n operation=operation, rule=rule)\n\n reconfigure_cluster(session, cluster, config_spec)",
"def ModifyCluster(self, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n body = kwargs\n\n return self._SendRequest(HTTP_PUT,\n \"/%s/modify\" % GANETI_RAPI_VERSION, query, body)",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def cluster(self, cluster):\n\n self._cluster = cluster",
"def update_cluster(project_id, location, realm_id, cluster_id):\n\n client = gaming.GameServerClustersServiceClient()\n\n request = game_server_clusters.UpdateGameServerClusterRequest(\n game_server_cluster=game_server_clusters.GameServerCluster(\n name=f\"projects/{project_id}/locations/{location}/realms/{realm_id}/gameServerClusters/{cluster_id}\",\n labels={\"label-key-1\": \"label-value-1\", \"label-key-2\": \"label-value-2\"},\n ),\n update_mask=field_mask.FieldMask(paths=[\"labels\"]),\n )\n\n operation = client.update_game_server_cluster(request)\n print(f\"Update cluster operation: {operation.operation.name}\")\n operation.result(timeout=120)",
"def version_recluster(self, serial, cluster,\n update_statistics_ancestors_depth=None):\n\n props = self.version_get_properties(serial)\n if not props:\n return\n node = props[NODE]\n size = props[SIZE]\n oldcluster = props[CLUSTER]\n if cluster == oldcluster:\n return\n\n mtime = time()\n self.statistics_update_ancestors(node, -1, -size, mtime, oldcluster,\n update_statistics_ancestors_depth)\n self.statistics_update_ancestors(node, 1, size, mtime, cluster,\n update_statistics_ancestors_depth)\n\n q = \"update versions set cluster = ? where serial = ?\"\n self.execute(q, (cluster, serial))",
"def update_solution(self, new_plan, use_cat=True, soc=True):\n if not new_plan.path:\n self.sol = TimeUncertaintySolution.empty_solution(self.sol.nodes_generated)\n self.sol.paths[new_plan.agent] = new_plan\n return\n if not use_cat:\n self.sol.paths[new_plan.agent] = new_plan\n self.sol.create_movement_tuples(agents=[new_plan.agent])\n return\n\n self.sol.paths[new_plan.agent] = new_plan\n new_moves = self.sol.add_stationary_moves()\n self.sol.create_movement_tuples(agents=[new_plan.agent])\n self.update_conflict_avoidance_table(new_plan.agent, new_moves)\n self.sol.compute_solution_cost(sum_of_costs=soc) # compute the cost",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif (\n self.context.raw_param.get(\"enable_addons\") is not None or\n self.context.raw_param.get(\"disable_addons\") is not None\n ):\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = check_is_msi_cluster(cluster)\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )",
"def put(self):\n request_data = request.get_json()\n plan = request_data[\"plan\"]\n\n user = get_authenticated_user()\n if not user.stripe_id:\n raise InvalidRequest()\n\n price = get_price(plan, False)\n if not price:\n abort(404, message=\"Plan not found\")\n\n return change_subscription(user, price)"
] |
[
"0.61214167",
"0.5992183",
"0.5901938",
"0.5507676",
"0.5250396",
"0.5219869",
"0.5205649",
"0.51211023",
"0.5120378",
"0.49291208",
"0.49023843",
"0.48664457",
"0.48022982",
"0.47577623",
"0.475658",
"0.47519207",
"0.47405082",
"0.46961132",
"0.4693477",
"0.46505013",
"0.46505013",
"0.46505013",
"0.46505013",
"0.46505013",
"0.46505013",
"0.46355048",
"0.4611149",
"0.46043423",
"0.45882553",
"0.45705494"
] |
0.7182371
|
0
|
Update security profile azureKeyvaultKms for the ManagedCluster object.
|
def update_azure_keyvault_kms(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
if self.context.get_enable_azure_keyvault_kms():
# get kms profile
if mc.security_profile is None:
mc.security_profile = self.models.ManagedClusterSecurityProfile()
azure_key_vault_kms_profile = mc.security_profile.azure_key_vault_kms
if azure_key_vault_kms_profile is None:
azure_key_vault_kms_profile = self.models.AzureKeyVaultKms()
mc.security_profile.azure_key_vault_kms = azure_key_vault_kms_profile
# set enabled
azure_key_vault_kms_profile.enabled = True
# set key id
azure_key_vault_kms_profile.key_id = self.context.get_azure_keyvault_kms_key_id()
# set network access, should never be None for now, can be safely assigned, temp fix for rp
# the value is obtained from user input or backfilled from existing mc or to default value
azure_key_vault_kms_profile.key_vault_network_access = (
self.context.get_azure_keyvault_kms_key_vault_network_access()
)
# set key vault resource id
if azure_key_vault_kms_profile.key_vault_network_access == CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE:
azure_key_vault_kms_profile.key_vault_resource_id = (
self.context.get_azure_keyvault_kms_key_vault_resource_id()
)
else:
azure_key_vault_kms_profile.key_vault_resource_id = ""
if self.context.get_disable_azure_keyvault_kms():
# get kms profile
if mc.security_profile is None:
mc.security_profile = self.models.ManagedClusterSecurityProfile()
azure_key_vault_kms_profile = mc.security_profile.azure_key_vault_kms
if azure_key_vault_kms_profile is None:
azure_key_vault_kms_profile = self.models.AzureKeyVaultKms()
mc.security_profile.azure_key_vault_kms = azure_key_vault_kms_profile
# set enabled to False
azure_key_vault_kms_profile.enabled = False
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_azure_keyvault_kms(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_azure_keyvault_kms():\n key_id = self.context.get_azure_keyvault_kms_key_id()\n if key_id:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.azure_key_vault_kms = self.models.AzureKeyVaultKms(\n enabled=True,\n key_id=key_id,\n )\n key_vault_network_access = self.context.get_azure_keyvault_kms_key_vault_network_access()\n mc.security_profile.azure_key_vault_kms.key_vault_network_access = key_vault_network_access\n if key_vault_network_access == CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE:\n mc.security_profile.azure_key_vault_kms.key_vault_resource_id = (\n self.context.get_azure_keyvault_kms_key_vault_resource_id()\n )\n\n return mc",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def set_up_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n # set intermediate\n self.context.set_intermediate(\"azuremonitormetrics_addon_enabled\", True, overwrite_exists=True)\n return mc",
"def set_up_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if hasattr(self.models, \"ManagedClusterStorageProfile\"):\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def set_up_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def set_up_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity_profile = None\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n kubelet_identity = self.context.get_identity_by_msi_client(assign_kubelet_identity)\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n client_id=kubelet_identity.client_id, # TODO: may remove, rp would take care of this\n object_id=kubelet_identity.principal_id # TODO: may remove, rp would take care of this\n )\n }\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id()\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_k8s_support_plan(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n support_plan = self.context.get_k8s_support_plan()\n if support_plan == KubernetesSupportPlan.AKS_LONG_TERM_SUPPORT:\n if mc is None or mc.sku is None or mc.sku.tier.lower() != CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM.lower():\n raise AzCLIError(\"Long term support is only available for premium tier clusters.\")\n\n mc.support_plan = support_plan\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def set_up_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n return mc",
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def set_up_linux_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n ssh_key_value, no_ssh_key = self.context.get_ssh_key_value_and_no_ssh_key()\n if not no_ssh_key:\n ssh_config = self.models.ContainerServiceSshConfiguration(\n public_keys=[\n self.models.ContainerServiceSshPublicKey(\n key_data=ssh_key_value\n )\n ]\n )\n linux_profile = self.models.ContainerServiceLinuxProfile(\n admin_username=self.context.get_admin_username(), ssh=ssh_config\n )\n mc.linux_profile = linux_profile\n return mc",
"def set_up_service_principal_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # If customer explicitly provide a service principal, disable managed identity.\n (\n service_principal,\n client_secret,\n ) = self.context.get_service_principal_and_client_secret()\n enable_managed_identity = self.context.get_enable_managed_identity()\n # Skip create service principal profile for the cluster if the cluster enables managed identity\n # and customer doesn't explicitly provide a service principal.\n if not (\n enable_managed_identity and\n not service_principal and\n not client_secret\n ):\n service_principal_profile = (\n self.models.ManagedClusterServicePrincipalProfile(\n client_id=service_principal, secret=client_secret\n )\n )\n mc.service_principal_profile = service_principal_profile\n return mc",
"def update_cluster_security_group(ec2, cluster_props):\n vpc = ec2.Vpc(id=cluster_props['VpcId'])\n\n # The first Security group should be the default one\n defaultSg = list(vpc.security_groups.all())[0]\n print(\"Default Security group:\", defaultSg)\n\n # Authorize access\n try:\n defaultSg.authorize_ingress(GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n print(\"Access authorized\")\n except botocore.exceptions.ClientError as e:\n print(\"ClientError:\", e)\n except Exception as e:\n print(\"Error:\", e)",
"def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_disable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(\n enabled=False\n )\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set enabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n\n if self.context.get_disable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set disabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False\n\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def configure_cluster_for_kerberos(self, cluster_name):\n return self._post(endpoint=('{}/clusters/{}/commands/'\n 'configureForKerberos').format(self.api_version,\n cluster_name),\n data={}).json()",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def aks_cluster_profile(self) -> 'outputs.ClusterPoolResourcePropertiesResponseAksClusterProfile':\n return pulumi.get(self, \"aks_cluster_profile\")"
] |
[
"0.65597516",
"0.6115231",
"0.604408",
"0.60109097",
"0.59037745",
"0.5771745",
"0.5722448",
"0.5669089",
"0.56399703",
"0.5556204",
"0.54925394",
"0.54224646",
"0.538033",
"0.53518724",
"0.5334519",
"0.5256056",
"0.523743",
"0.5231732",
"0.5216337",
"0.5184685",
"0.5183231",
"0.5138974",
"0.5126557",
"0.5103482",
"0.50627816",
"0.50238234",
"0.50170356",
"0.5008548",
"0.5006656",
"0.49884102"
] |
0.7189915
|
0
|
Update security profile imageCleaner for the ManagedCluster object.
|
def update_image_cleaner(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
enable_image_cleaner = self.context.get_enable_image_cleaner()
disable_image_cleaner = self.context.get_disable_image_cleaner()
interval_hours = self.context.get_image_cleaner_interval_hours()
# no image cleaner related changes
if not enable_image_cleaner and not disable_image_cleaner and interval_hours is None:
return mc
if mc.security_profile is None:
mc.security_profile = self.models.ManagedClusterSecurityProfile()
image_cleaner_profile = mc.security_profile.image_cleaner
if image_cleaner_profile is None:
image_cleaner_profile = self.models.ManagedClusterSecurityProfileImageCleaner()
mc.security_profile.image_cleaner = image_cleaner_profile
# init the image cleaner profile
image_cleaner_profile.enabled = False
image_cleaner_profile.interval_hours = 7 * 24
if enable_image_cleaner:
image_cleaner_profile.enabled = True
if disable_image_cleaner:
image_cleaner_profile.enabled = False
if interval_hours is not None:
image_cleaner_profile.interval_hours = interval_hours
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_image_cleaner(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n interval_hours = self.context.get_image_cleaner_interval_hours()\n\n if self.context.get_enable_image_cleaner():\n\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n if not interval_hours:\n # default value for intervalHours - one week\n interval_hours = 24 * 7\n\n mc.security_profile.image_cleaner = self.models.ManagedClusterSecurityProfileImageCleaner(\n enabled=True,\n interval_hours=interval_hours,\n )\n\n return mc",
"def image_cleaner(self) -> Optional[pulumi.Input['ManagedClusterSecurityProfileImageCleanerArgs']]:\n return pulumi.get(self, \"image_cleaner\")",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_disable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(\n enabled=False\n )\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set enabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n\n if self.context.get_disable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()\n\n # set disabled\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False\n\n return mc",
"def update_mean(img, clustermask):\n flat = img.flatten()\n flat.reshape((int(flat.shape[0] / 3), 3))\n w, h, _ = clustermask.shape\n cluster_assignees={}\n for cid,_ in enumerate(current_cluster_centers):\n cluster_assignees[cid] = []\n for x in range(w):\n for y in range(h):\n cid = clustermask[x, y][0]\n cluster_assignees[cid].append(img[x,y])\n for cid, pixels in cluster_assignees.items():\n current_cluster_centers[cid] = np.mean(np.array(pixels),axis=0)\n return clustermask",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def cleanup_cluster(self, cluster):\n self.log.info(\"removing xdcr/nodes settings\")\n rest = RestConnection(cluster.get_master_node())\n rest.remove_all_replications()\n rest.remove_all_remote_clusters()\n rest.remove_all_recoveries()\n cluster.cleanup_cluster(\"upgradeXDCR\")",
"def set_up_linux_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n ssh_key_value, no_ssh_key = self.context.get_ssh_key_value_and_no_ssh_key()\n if not no_ssh_key:\n ssh_config = self.models.ContainerServiceSshConfiguration(\n public_keys=[\n self.models.ContainerServiceSshPublicKey(\n key_data=ssh_key_value\n )\n ]\n )\n linux_profile = self.models.ContainerServiceLinuxProfile(\n admin_username=self.context.get_admin_username(), ssh=ssh_config\n )\n mc.linux_profile = linux_profile\n return mc",
"def reconfiguration_scalability(self):\n\n self.check_run('reconfiguration_scalability')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_nova_ephemeral_disk\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n config = utils.get_config_template('nova_disk')\n structured_config_nova = get_structured_config_dict(config)\n config = utils.get_config_template('keystone')\n structured_config_keystone = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='controller')\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='controller')\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(4)\n self.check_config_on_remote(controllers, structured_config_keystone)\n\n self.show_step(5)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n time_expiration = config[\n 'keystone_config']['token/expiration']['value']\n self.check_token_expiration(os_conn, time_expiration)\n\n self.show_step(6)\n bs_nodes = [x for x in self.env.d_env.get_nodes()\n if x.name == 'slave-05' or x.name == 'slave-06']\n self.env.bootstrap_nodes(bs_nodes)\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['compute', 'cinder']})\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-06': ['controller']})\n\n self.show_step(7)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.show_step(10)\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_controller = [x for x in controllers\n if 'slave-06' in x['name']]\n target_compute = [x for x in computes\n if 'slave-05' in x['name']]\n self.check_config_on_remote(target_controller,\n structured_config_keystone)\n\n self.show_step(11)\n self.check_config_on_remote(target_compute, structured_config_nova)\n\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.show_step(16)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n hypervisor_name = target_compute[0]['fqdn']\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.show_step(17)\n self.check_token_expiration(os_conn, time_expiration)\n\n self.env.make_snapshot(\"reconfiguration_scalability\", is_make=True)",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]",
"def _update_helper(self, df_series_clean, profile):\n if self._NumericStatsMixin__calculations:\n NumericStatsMixin._update_helper(self, df_series_clean, profile)\n self._update_column_base_properties(profile)",
"def set_up_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if hasattr(self.models, \"ManagedClusterStorageProfile\"):\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_network_plugin_settings(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n network_plugin_mode = self.context.get_network_plugin_mode()\n if network_plugin_mode:\n mc.network_profile.network_plugin_mode = network_plugin_mode\n\n (\n pod_cidr,\n _,\n _,\n _,\n _\n ) = self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n\n network_dataplane = self.context.get_network_dataplane()\n if network_dataplane:\n mc.network_profile.network_dataplane = network_dataplane\n\n if pod_cidr:\n mc.network_profile.pod_cidr = pod_cidr\n return mc",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def upscale_cluster_info(VMname, master=False):\n with open('TemporaryInfo.json', mode='r') as jsonfile:\n TemporaryInfo = json.load(jsonfile)\n privateIP = TemporaryInfo.get(\"privateIpAddress\")\n publicIP = TemporaryInfo.get(\"publicIpAddress\")\n jsonfile.close()\n\n with open('ClusterInfo.json', mode='r') as jsonfile:\n if len(jsonfile.readline()) == 0:\n sys.exit('Error: ClusterInfo.json file appears to be empty.')\n else:\n jsonfile.seek(0,0) # Return the pointer to the beginning of the file\n ClusterInfo = json.load(jsonfile)\n nrSlaves = ClusterInfo[0].get(\"NumberSlaves\")\n jsonfile.close()\n\n with open('ClusterInfoUpdated.json', mode='w') as jsonfile:\n if master:\n if ClusterInfo[0][\"ExistMaster\"]:\n sys.exit('Error: Trying to add a master while according to ClusterInfo there already is one.')\n else:\n newmaster = {}\n newmaster['privateIP'] = privateIP\n newmaster['publicIP'] = publicIP\n newmaster['role'] = 'Master_and_Slave'\n newmaster['VMname'] = VMname\n nrSlaves += 1 # Adding a new slave to the count\n ClusterInfo[0][\"ExistMaster\"] = True\n ClusterInfo.append(newmaster)\n\n if not ClusterInfo[0][\"ExistMaster\"]:\n sys.exit('Error: Trying to add a slave while according to ClusterInfo there is no master.')\n if not master:\n nrSlaves += 1 # Adding a new slave to the count\n newslave = {}\n newslave['privateIP'] = privateIP\n newslave['publicIP'] = publicIP\n newslave['VMname'] = VMname\n newslave['SlaveID'] = str(nrSlaves)\n newslave['role'] = 'Slave'\n ClusterInfo.append(newslave)\n\n ClusterInfo[0][\"NumberSlaves\"] = nrSlaves\n json.dump(ClusterInfo, jsonfile)\n jsonfile.close()\n\n return",
"def manage_image_cache(self, context, all_instances):\n pass",
"def update_classify(self, img, rho=0.01, threshold=2.5):\n self.update(img, rho)\n return self.classify(img, threshold)",
"def update_cluster_security_group(ec2, cluster_props):\n vpc = ec2.Vpc(id=cluster_props['VpcId'])\n\n # The first Security group should be the default one\n defaultSg = list(vpc.security_groups.all())[0]\n print(\"Default Security group:\", defaultSg)\n\n # Authorize access\n try:\n defaultSg.authorize_ingress(GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n print(\"Access authorized\")\n except botocore.exceptions.ClientError as e:\n print(\"ClientError:\", e)\n except Exception as e:\n print(\"Error:\", e)",
"def save(self):\n try:\n old_avatar_service = (\n self.avatar_service_registry\n .get_avatar_service(\n self.settings_manager.avatar_service_id)\n )\n except ItemLookupError:\n old_avatar_service = None\n\n if old_avatar_service and old_avatar_service.is_configurable():\n old_avatar_service.cleanup(self.user)\n self.settings_manager.configuration.pop(\n old_avatar_service.avatar_service_id)\n\n avatar_service_id = self.cleaned_data['avatar_service_id']\n new_avatar_service = (\n self.avatar_service_registry\n .get_avatar_service(avatar_service_id)\n )\n self.settings_manager.avatar_service_id = avatar_service_id\n\n if new_avatar_service.is_configurable():\n avatar_service_form = self.avatar_service_forms[avatar_service_id]\n self.settings_manager.configuration[avatar_service_id] = \\\n avatar_service_form.save()\n\n self.settings_manager.save()",
"def update_preprocessing_gmwmi(self, new):\n self.stages[\"Preprocessing\"].config.gmwmi_seeding = new\n self.stages[\"Registration\"].config.gmwmi_seeding = new"
] |
[
"0.6851309",
"0.67022616",
"0.5652698",
"0.555268",
"0.54567385",
"0.5302302",
"0.52607596",
"0.50487584",
"0.50180537",
"0.5011092",
"0.5008191",
"0.5006725",
"0.49423403",
"0.48312888",
"0.47744167",
"0.47587383",
"0.47480166",
"0.47262153",
"0.47243848",
"0.47189918",
"0.47152266",
"0.46856663",
"0.46436518",
"0.46365514",
"0.45920148",
"0.4584669",
"0.45822766",
"0.45788258",
"0.45580357",
"0.45502403"
] |
0.73032594
|
0
|
Update identity profile for the ManagedCluster object.
|
def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
assign_kubelet_identity = self.context.get_assign_kubelet_identity()
if assign_kubelet_identity:
identity_profile = {
'kubeletidentity': self.models.UserAssignedIdentity(
resource_id=assign_kubelet_identity,
)
}
user_assigned_identity = self.context.get_assign_identity()
if not user_assigned_identity:
user_assigned_identity = self.context.get_user_assignd_identity_from_mc()
cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)
# ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity
self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(
self.cmd,
cluster_identity_object_id,
assign_kubelet_identity)
mc.identity_profile = identity_profile
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def set_up_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity_profile = None\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n kubelet_identity = self.context.get_identity_by_msi_client(assign_kubelet_identity)\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n client_id=kubelet_identity.client_id, # TODO: may remove, rp would take care of this\n object_id=kubelet_identity.principal_id # TODO: may remove, rp would take care of this\n )\n }\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id()\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def set_up_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def set_up_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity = None\n enable_managed_identity = self.context.get_enable_managed_identity()\n assign_identity = self.context.get_assign_identity()\n if enable_managed_identity and not assign_identity:\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif enable_managed_identity and assign_identity:\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def cluster_identity_modify(self, cluster_location=None, cluster_contact=None, cluster_name=None):\n return self.request( \"cluster-identity-modify\", {\n 'cluster_location': [ cluster_location, 'cluster-location', [ basestring, 'None' ], False ],\n 'cluster_contact': [ cluster_contact, 'cluster-contact', [ basestring, 'None' ], False ],\n 'cluster_name': [ cluster_name, 'cluster-name', [ basestring, 'None' ], False ],\n }, {\n } )",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )",
"def set_up_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n oidc_issuer_profile = self.context.get_oidc_issuer_profile()\n if oidc_issuer_profile is not None:\n mc.oidc_issuer_profile = oidc_issuer_profile\n\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def set_up_service_principal_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # If customer explicitly provide a service principal, disable managed identity.\n (\n service_principal,\n client_secret,\n ) = self.context.get_service_principal_and_client_secret()\n enable_managed_identity = self.context.get_enable_managed_identity()\n # Skip create service principal profile for the cluster if the cluster enables managed identity\n # and customer doesn't explicitly provide a service principal.\n if not (\n enable_managed_identity and\n not service_principal and\n not client_secret\n ):\n service_principal_profile = (\n self.models.ManagedClusterServicePrincipalProfile(\n client_id=service_principal, secret=client_secret\n )\n )\n mc.service_principal_profile = service_principal_profile\n return mc",
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)",
"def put(self, entity, schema):\n profile = entity.profiles.get_or_404(schema=schema)\n try:\n update_data = json.loads(request.data)\n except json.JSONDecodeError as e:\n raise APIBadRequest(str(e))\n\n if 'identity' in update_data:\n profile.identity = update_data['identity']\n if 'servers' in update_data:\n profile.servers = update_data['servers']\n\n profile.save()\n\n return jsonify(profile.to_json()), 200",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def customer_profile_oid(self, customer_profile_oid):\n\n self._customer_profile_oid = customer_profile_oid",
"def set_up_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if hasattr(self.models, \"ManagedClusterStorageProfile\"):\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc"
] |
[
"0.7364104",
"0.7085298",
"0.66386825",
"0.6575664",
"0.6477139",
"0.6461136",
"0.6394247",
"0.63911194",
"0.63665646",
"0.63287127",
"0.6208386",
"0.59377927",
"0.5912477",
"0.58874655",
"0.58811486",
"0.5802723",
"0.57800543",
"0.5643181",
"0.5636011",
"0.55286914",
"0.5502603",
"0.5495467",
"0.5487281",
"0.54636115",
"0.5422025",
"0.5419615",
"0.5391886",
"0.53876305",
"0.53741235",
"0.5315749"
] |
0.76883554
|
0
|
Update workload autoscaler profile for the ManagedCluster object.
|
def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
if self.context.get_enable_keda():
if mc.workload_auto_scaler_profile is None:
mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()
mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)
if self.context.get_disable_keda():
if mc.workload_auto_scaler_profile is None:
mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()
mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(
enabled=False
)
if self.context.get_enable_vpa():
if mc.workload_auto_scaler_profile is None:
mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()
if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:
mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()
# set enabled
mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True
if self.context.get_disable_vpa():
if mc.workload_auto_scaler_profile is None:
mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()
if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:
mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler()
# set disabled
mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = False
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def set_up_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_keda():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)\n\n if self.context.get_enable_vpa():\n if mc.workload_auto_scaler_profile is None:\n mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile()\n if mc.workload_auto_scaler_profile.vertical_pod_autoscaler is None:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler = self.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler(enabled=True)\n else:\n mc.workload_auto_scaler_profile.vertical_pod_autoscaler.enabled = True\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def get_cluster_autoscaler_profile(self) -> Union[Dict[str, str], None]:\n return self._get_cluster_autoscaler_profile()",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def set_cluster_autoscaler(enabled, worker_pool_names=None, new_worker_pool_names=None):\n modified_pools = []\n if k8s.exists('configmap', 'kube-system', 'iks-ca-configmap'):\n config_map = k8s.get('configmap', 'kube-system', 'iks-ca-configmap')\n worker_pools_config = json.loads(config_map['data']['workerPoolsConfig.json'])\n rename_worker_pools = new_worker_pool_names and worker_pool_names and len(new_worker_pool_names) == len(worker_pool_names)\n for pool_config in worker_pools_config:\n if not worker_pool_names or pool_config['name'] in worker_pool_names:\n if rename_worker_pools:\n pool_config['name'] = new_worker_pool_names[worker_pool_names.index(pool_config['name'])]\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n elif pool_config['enabled'] != enabled:\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n if modified_pools:\n config_map['data']['workerPoolsConfig.json'] = json.dumps(worker_pools_config, ensure_ascii=False) # TODO: Remove ensure_ascii when migration to py3 is complete\n k8s.apply(config_map)\n else:\n logger.info('Cluster autoscaler is not present')\n return modified_pools",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def _get_cluster_autoscaler_profile(self, read_only: bool = False) -> Union[Dict[str, str], None]:\n # read the original value passed by the command\n cluster_autoscaler_profile = self.raw_param.get(\"cluster_autoscaler_profile\")\n # parse and validate user input\n cluster_autoscaler_profile = self.__validate_cluster_autoscaler_profile(cluster_autoscaler_profile)\n\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.auto_scaler_profile is not None:\n cluster_autoscaler_profile = self.mc.auto_scaler_profile\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return cluster_autoscaler_profile\n\n # dynamic completion for update mode only\n if not read_only and self.decorator_mode == DecoratorMode.UPDATE:\n if cluster_autoscaler_profile and self.mc and self.mc.auto_scaler_profile:\n # shallow copy should be enough for string-to-string dictionary\n copy_of_raw_dict = self.mc.auto_scaler_profile.__dict__.copy()\n new_options_dict = dict(\n (key.replace(\"-\", \"_\"), value)\n for (key, value) in cluster_autoscaler_profile.items()\n )\n copy_of_raw_dict.update(new_options_dict)\n cluster_autoscaler_profile = copy_of_raw_dict\n\n # this parameter does not need validation\n return cluster_autoscaler_profile",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def set_up_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def set_up_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_profile = None\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel:\n auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile(upgrade_channel=auto_upgrade_channel)\n mc.auto_upgrade_profile = auto_upgrade_profile\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def update_image_cleaner(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_image_cleaner = self.context.get_enable_image_cleaner()\n disable_image_cleaner = self.context.get_disable_image_cleaner()\n interval_hours = self.context.get_image_cleaner_interval_hours()\n\n # no image cleaner related changes\n if not enable_image_cleaner and not disable_image_cleaner and interval_hours is None:\n return mc\n\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n image_cleaner_profile = mc.security_profile.image_cleaner\n\n if image_cleaner_profile is None:\n image_cleaner_profile = self.models.ManagedClusterSecurityProfileImageCleaner()\n mc.security_profile.image_cleaner = image_cleaner_profile\n\n # init the image cleaner profile\n image_cleaner_profile.enabled = False\n image_cleaner_profile.interval_hours = 7 * 24\n\n if enable_image_cleaner:\n image_cleaner_profile.enabled = True\n\n if disable_image_cleaner:\n image_cleaner_profile.enabled = False\n\n if interval_hours is not None:\n image_cleaner_profile.interval_hours = interval_hours\n\n return mc",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def set_up_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def auto_scaling(self, auto_scaling):\n\n self.container['auto_scaling'] = auto_scaling",
"def set_up_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity_profile = None\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n kubelet_identity = self.context.get_identity_by_msi_client(assign_kubelet_identity)\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n client_id=kubelet_identity.client_id, # TODO: may remove, rp would take care of this\n object_id=kubelet_identity.principal_id # TODO: may remove, rp would take care of this\n )\n }\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id()\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc"
] |
[
"0.82561743",
"0.75433207",
"0.7293663",
"0.6517002",
"0.6411876",
"0.6182385",
"0.61311543",
"0.59943694",
"0.5993539",
"0.59688723",
"0.59505665",
"0.59278333",
"0.5866863",
"0.5812482",
"0.56214845",
"0.5597126",
"0.5546318",
"0.5526554",
"0.5516587",
"0.550821",
"0.54804605",
"0.5473573",
"0.5358035",
"0.5333033",
"0.5322303",
"0.52746683",
"0.5265307",
"0.5248805",
"0.5243292",
"0.5221047"
] |
0.7946528
|
1
|
Update azure monitor profile for the ManagedCluster object.
|
def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:
self._ensure_mc(mc)
# read the original value passed by the command
ksm_metric_labels_allow_list = self.context.raw_param.get("ksm_metric_labels_allow_list")
ksm_metric_annotations_allow_list = self.context.raw_param.get("ksm_metric_annotations_allow_list")
if ksm_metric_labels_allow_list is None:
ksm_metric_labels_allow_list = ""
if ksm_metric_annotations_allow_list is None:
ksm_metric_annotations_allow_list = ""
if self.context.get_enable_azure_monitor_metrics():
if mc.azure_monitor_profile is None:
mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()
mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)
mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long
metric_labels_allowlist=str(ksm_metric_labels_allow_list),
metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))
if self.context.get_disable_azure_monitor_metrics():
if mc.azure_monitor_profile is None:
mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()
mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)
if (
self.context.raw_param.get("enable_azure_monitor_metrics") or
self.context.raw_param.get("disable_azure_monitor_metrics")
):
self.context.external_functions.ensure_azure_monitor_profile_prerequisites(
self.cmd,
self.context.get_subscription_id(),
self.context.get_resource_group_name(),
self.context.get_name(),
self.context.get_location(),
self.__raw_parameters,
self.context.get_disable_azure_monitor_metrics(),
False)
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_up_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n # set intermediate\n self.context.set_intermediate(\"azuremonitormetrics_addon_enabled\", True, overwrite_exists=True)\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def __init__(__self__, *,\n metrics: Optional[pulumi.Input['ManagedClusterAzureMonitorProfileMetricsArgs']] = None):\n if metrics is not None:\n pulumi.set(__self__, \"metrics\", metrics)",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif (\n self.context.raw_param.get(\"enable_addons\") is not None or\n self.context.raw_param.get(\"disable_addons\") is not None\n ):\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = check_is_msi_cluster(cluster)\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif self.context.raw_param.get(\"enable_addons\") is not None:\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = self.context.get_enable_managed_identity()\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )\n\n # azure monitor metrics addon (v2)\n azuremonitormetrics_addon_enabled = self.context.get_intermediate(\n \"azuremonitormetrics_addon_enabled\",\n default_value=False\n )\n if azuremonitormetrics_addon_enabled:\n # Create the DC* objects, AMW, recording rules and grafana link here\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n True\n )",
"def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n profile = self.context.get_workload_identity_profile()\n if profile:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n mc.security_profile.workload_identity = profile\n\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def update_health_monitor(self, health_monitor, body=None):\r\n return self.put(self.health_monitor_path % (health_monitor), body=body)",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def set_up_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if hasattr(self.models, \"ManagedClusterStorageProfile\"):\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def test_update_health_monitor(self):\r\n resource = 'health_monitor'\r\n cmd = healthmonitor.UpdateHealthMonitor(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--timeout', '5'],\r\n {'timeout': '5', })",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def put(self, request, health_monitor_id):\n update_monitor(request)",
"def set_up_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_profile = None\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel:\n auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile(upgrade_channel=auto_upgrade_channel)\n mc.auto_upgrade_profile = auto_upgrade_profile\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n return mc",
"def update_sku(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Premium without LTS is ok (not vice versa)\n if self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Premium\"\n )\n\n if self.context.get_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Standard\"\n )\n\n if self.context.get_no_uptime_sla() or self.context.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_FREE:\n mc.sku = self.models.ManagedClusterSKU(\n name=\"Base\",\n tier=\"Free\"\n )\n return mc",
"def update_log_config(self, monitor_name, log_config):\n pass"
] |
[
"0.7277104",
"0.63934505",
"0.61648846",
"0.614551",
"0.61402935",
"0.6066223",
"0.5994208",
"0.5984191",
"0.5957272",
"0.5943673",
"0.5894648",
"0.5878133",
"0.5850033",
"0.57665926",
"0.57402784",
"0.5631679",
"0.5623526",
"0.5593438",
"0.55433184",
"0.54727024",
"0.54588145",
"0.54497594",
"0.5385347",
"0.53590405",
"0.53444624",
"0.53138417",
"0.5285443",
"0.52764785",
"0.5256779",
"0.52381"
] |
0.7853511
|
0
|
The overall controller used to update the default ManagedCluster profile. The completely updated ManagedCluster object will later be passed as a parameter to the underlying SDK (mgmtcontainerservice) to send the actual request.
|
def update_mc_profile_default(self) -> ManagedCluster:
# check raw parameters
# promt y/n if no options are specified to ask user whether to perform a reconcile operation
self.check_raw_parameters()
# fetch the ManagedCluster object
mc = self.fetch_mc()
# update agentpool profile by the agentpool decorator
mc = self.update_agentpool_profile(mc)
# update auto scaler profile
mc = self.update_auto_scaler_profile(mc)
# update tags
mc = self.update_tags(mc)
# attach or detach acr (add or delete role assignment for acr)
self.process_attach_detach_acr(mc)
# update sku (uptime sla)
mc = self.update_sku(mc)
# update outbound type
mc = self.update_outbound_type_in_network_profile(mc)
# update load balancer profile
mc = self.update_load_balancer_profile(mc)
# update nat gateway profile
mc = self.update_nat_gateway_profile(mc)
# update disable/enable local accounts
mc = self.update_disable_local_accounts(mc)
# update api server access profile
mc = self.update_api_server_access_profile(mc)
# update windows profile
mc = self.update_windows_profile(mc)
# update network plugin settings
mc = self.update_network_plugin_settings(mc)
# update aad profile
mc = self.update_aad_profile(mc)
# update oidc issuer profile
mc = self.update_oidc_issuer_profile(mc)
# update auto upgrade profile
mc = self.update_auto_upgrade_profile(mc)
# update identity
mc = self.update_identity(mc)
# update addon profiles
mc = self.update_addon_profiles(mc)
# update defender
mc = self.update_defender(mc)
# update workload identity profile
mc = self.update_workload_identity_profile(mc)
# update stroage profile
mc = self.update_storage_profile(mc)
# update azure keyvalut kms
mc = self.update_azure_keyvault_kms(mc)
# update image cleaner
mc = self.update_image_cleaner(mc)
# update identity
mc = self.update_identity_profile(mc)
# set up http proxy config
mc = self.update_http_proxy_config(mc)
# update workload autoscaler profile
mc = self.update_workload_auto_scaler_profile(mc)
# update kubernetes support plan
mc = self.update_k8s_support_plan(mc)
# update azure monitor metrics profile
mc = self.update_azure_monitor_profile(mc)
return mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fusion_api_update_hypervisor_cluster_profile(self, uri=None, body=None, api=None, headers=None):\n return self.cluster_profile.update(body=body, uri=uri, api=api, headers=headers)",
"def update_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.agent_pool_profiles:\n raise UnknownError(\n \"Encounter an unexpected error while getting agent pool profiles from the cluster in the process of \"\n \"updating agentpool profile.\"\n )\n\n agentpool_profile = self.agentpool_decorator.update_agentpool_profile_default(mc.agent_pool_profiles)\n mc.agent_pool_profiles[0] = agentpool_profile\n\n # update nodepool labels for all nodepools\n nodepool_labels = self.context.get_nodepool_labels()\n if nodepool_labels is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_labels = nodepool_labels\n\n # update nodepool taints for all nodepools\n nodepool_taints = self.context.get_nodepool_taints()\n if nodepool_taints is not None:\n for agent_profile in mc.agent_pool_profiles:\n agent_profile.node_taints = nodepool_taints\n return mc",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def update_auto_scaler_profile(self, mc):\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n if cluster_autoscaler_profile is not None:\n # update profile (may clear profile with empty dictionary)\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def construct_mc_profile_default(self, bypass_restore_defaults: bool = False) -> ManagedCluster:\n # initialize the ManagedCluster object\n mc = self.init_mc()\n # DO NOT MOVE: remove defaults\n self._remove_defaults_in_mc(mc)\n\n # set up agentpool profile\n mc = self.set_up_agentpool_profile(mc)\n # set up misc direct mc properties\n mc = self.set_up_mc_properties(mc)\n # set up linux profile (for ssh access)\n mc = self.set_up_linux_profile(mc)\n # set up windows profile\n mc = self.set_up_windows_profile(mc)\n # set up service principal profile\n mc = self.set_up_service_principal_profile(mc)\n # add role assignment for vent subnet\n self.process_add_role_assignment_for_vnet_subnet(mc)\n # attach acr (add role assignment for acr)\n self.process_attach_acr(mc)\n # set up network profile\n mc = self.set_up_network_profile(mc)\n # set up addon profiles\n mc = self.set_up_addon_profiles(mc)\n # set up aad profile\n mc = self.set_up_aad_profile(mc)\n # set up oidc issuer profile\n mc = self.set_up_oidc_issuer_profile(mc)\n # set up api server access profile and fqdn subdomain\n mc = self.set_up_api_server_access_profile(mc)\n # set up identity\n mc = self.set_up_identity(mc)\n # set up identity profile\n mc = self.set_up_identity_profile(mc)\n # set up auto upgrade profile\n mc = self.set_up_auto_upgrade_profile(mc)\n # set up auto scaler profile\n mc = self.set_up_auto_scaler_profile(mc)\n # set up sku\n mc = self.set_up_sku(mc)\n # set up extended location\n mc = self.set_up_extended_location(mc)\n # set up node resource group\n mc = self.set_up_node_resource_group(mc)\n # set up defender\n mc = self.set_up_defender(mc)\n # set up workload identity profile\n mc = self.set_up_workload_identity_profile(mc)\n # set up storage profile\n mc = self.set_up_storage_profile(mc)\n # set up azure keyvalut kms\n mc = self.set_up_azure_keyvault_kms(mc)\n # set up image cleaner\n mc = self.set_up_image_cleaner(mc)\n # set up http proxy config\n mc = self.set_up_http_proxy_config(mc)\n # set up workload autoscaler profile\n mc = self.set_up_workload_auto_scaler_profile(mc)\n\n # setup k8s support plan\n mc = self.set_up_k8s_support_plan(mc)\n # set up azure monitor metrics profile\n mc = self.set_up_azure_monitor_profile(mc)\n # DO NOT MOVE: keep this at the bottom, restore defaults\n if not bypass_restore_defaults:\n mc = self._restore_defaults_in_mc(mc)\n return mc",
"def _0_cluster_profile(self, _0_cluster_profile):\n\n self.__0_cluster_profile = _0_cluster_profile",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def management_cluster(self) -> pulumi.Input['PrivateCloudManagementClusterArgs']:\n return pulumi.get(self, \"management_cluster\")",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def management_cluster(self) -> pulumi.Output['outputs.PrivateCloudManagementCluster']:\n return pulumi.get(self, \"management_cluster\")",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def reload(self):\n cluster_kubeconfig = self.ocp.cluster_kubeconfig\n self.data = self.get()\n self.__init__(**self.data)\n self.ocp.cluster_kubeconfig = cluster_kubeconfig",
"def management_cluster(self) -> Optional[pulumi.Input['PrivateCloudManagementClusterArgs']]:\n return pulumi.get(self, \"management_cluster\")",
"def set_up_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_cluster_security_group(ec2, cluster_props):\n vpc = ec2.Vpc(id=cluster_props['VpcId'])\n\n # The first Security group should be the default one\n defaultSg = list(vpc.security_groups.all())[0]\n print(\"Default Security group:\", defaultSg)\n\n # Authorize access\n try:\n defaultSg.authorize_ingress(GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n print(\"Access authorized\")\n except botocore.exceptions.ClientError as e:\n print(\"ClientError:\", e)\n except Exception as e:\n print(\"Error:\", e)",
"def Update(self, controller):\n pass",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if not mc.network_profile:\n raise UnknownError(\n \"Encounter an unexpected error while getting network profile from the cluster in the process of \"\n \"updating its load balancer profile.\"\n )\n outbound_type = self.context.get_outbound_type()\n if outbound_type and outbound_type != CONST_OUTBOUND_TYPE_LOAD_BALANCER:\n mc.network_profile.load_balancer_profile = None\n else:\n load_balancer_managed_outbound_ip_count = self.context.get_load_balancer_managed_outbound_ip_count()\n load_balancer_managed_outbound_ipv6_count = self.context.get_load_balancer_managed_outbound_ipv6_count()\n load_balancer_outbound_ips = self.context.get_load_balancer_outbound_ips()\n load_balancer_outbound_ip_prefixes = self.context.get_load_balancer_outbound_ip_prefixes()\n load_balancer_outbound_ports = self.context.get_load_balancer_outbound_ports()\n load_balancer_idle_timeout = self.context.get_load_balancer_idle_timeout()\n # In the internal function \"_update_load_balancer_profile\", it will check whether the provided parameters\n # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will\n # remain unchanged.\n mc.network_profile.load_balancer_profile = _update_load_balancer_profile(\n managed_outbound_ip_count=load_balancer_managed_outbound_ip_count,\n managed_outbound_ipv6_count=load_balancer_managed_outbound_ipv6_count,\n outbound_ips=load_balancer_outbound_ips,\n outbound_ip_prefixes=load_balancer_outbound_ip_prefixes,\n outbound_ports=load_balancer_outbound_ports,\n idle_timeout=load_balancer_idle_timeout,\n profile=mc.network_profile.load_balancer_profile,\n models=self.models.load_balancer_models)\n return mc",
"def ModifyCluster(self, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n body = kwargs\n\n return self._SendRequest(HTTP_PUT,\n \"/%s/modify\" % GANETI_RAPI_VERSION, query, body)",
"def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()\n mc.auto_scaler_profile = cluster_autoscaler_profile\n return mc",
"def update_network_plugin_settings(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n network_plugin_mode = self.context.get_network_plugin_mode()\n if network_plugin_mode:\n mc.network_profile.network_plugin_mode = network_plugin_mode\n\n (\n pod_cidr,\n _,\n _,\n _,\n _\n ) = self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()\n\n network_dataplane = self.context.get_network_dataplane()\n if network_dataplane:\n mc.network_profile.network_dataplane = network_dataplane\n\n if pod_cidr:\n mc.network_profile.pod_cidr = pod_cidr\n return mc",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def set_up_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n api_server_access_profile = None\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n enable_private_cluster = self.context.get_enable_private_cluster()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n private_dns_zone = self.context.get_private_dns_zone()\n if api_server_authorized_ip_ranges or enable_private_cluster:\n api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile(\n authorized_ip_ranges=api_server_authorized_ip_ranges,\n enable_private_cluster=True if enable_private_cluster else None,\n enable_private_cluster_public_fqdn=False if disable_public_fqdn else None,\n private_dns_zone=private_dns_zone\n )\n mc.api_server_access_profile = api_server_access_profile\n\n fqdn_subdomain = self.context.get_fqdn_subdomain()\n mc.fqdn_subdomain = fqdn_subdomain\n return mc"
] |
[
"0.597643",
"0.58742917",
"0.5851303",
"0.5781573",
"0.5772862",
"0.576339",
"0.57513094",
"0.56952155",
"0.56308115",
"0.56079274",
"0.55892634",
"0.5554325",
"0.5532541",
"0.54891205",
"0.5482881",
"0.5433022",
"0.5402849",
"0.54027134",
"0.5315797",
"0.5315068",
"0.5307258",
"0.5298067",
"0.5295921",
"0.52507627",
"0.52441734",
"0.5237026",
"0.52339053",
"0.520247",
"0.51887393",
"0.51808804"
] |
0.7316749
|
0
|
Helper function to check if postprocessing is required after sending a PUT request to create the cluster.
|
def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool:
# some addons require post cluster creation role assigment
monitoring_addon_enabled = self.context.get_intermediate("monitoring_addon_enabled", default_value=False)
ingress_appgw_addon_enabled = self.context.get_intermediate("ingress_appgw_addon_enabled", default_value=False)
virtual_node_addon_enabled = self.context.get_intermediate("virtual_node_addon_enabled", default_value=False)
enable_managed_identity = check_is_msi_cluster(mc)
attach_acr = self.context.get_attach_acr()
if (
monitoring_addon_enabled or
ingress_appgw_addon_enabled or
virtual_node_addon_enabled or
(enable_managed_identity and attach_acr)
):
return True
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool:\n # some addons require post cluster creation role assigment\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n azuremonitormetrics_addon_enabled = self.context.get_intermediate(\n \"azuremonitormetrics_addon_enabled\",\n default_value=False\n )\n enable_managed_identity = self.context.get_enable_managed_identity()\n attach_acr = self.context.get_attach_acr()\n need_grant_vnet_permission_to_cluster_identity = self.context.get_intermediate(\n \"need_post_creation_vnet_permission_granting\", default_value=False\n )\n\n if (\n monitoring_addon_enabled or\n ingress_appgw_addon_enabled or\n virtual_node_addon_enabled or\n azuremonitormetrics_addon_enabled or\n (enable_managed_identity and attach_acr) or\n need_grant_vnet_permission_to_cluster_identity\n ):\n return True\n return False",
"def is_bootstrapped(self):\n\n # Attempt to bootstrap without providing any of the required fields, and inspect the exception\n try:\n response = self._connection.post(\"/deployment/new\", json={})\n raise TransportError(\"POST {} to /deployment/new should have raised an exception, but didn't\", response)\n except ValueError as e:\n if e.args[0] == 400:\n # The server is willing to accept correct field values to bootstrap with, so isn't bootstrapped yet.\n return False\n if e.args[0] == 403:\n # The server is no longer willing to accept POSTs to /deployment/new, because it's already bootstrapped.\n return True\n raise\n raise TransportError(response)",
"def is_put_or_post(split_request_header: list) -> bool:\n if split_request_header[0] == \"PUT\":\n return True\n elif split_request_header[0] == \"POST\":\n return True\n\n return False",
"def check_if_cluster_was_upgraded():\n return True if \"replaces\" in get_ocs_csv().get().get(\"spec\") else False",
"def is_catastrophic(self):\n if (self.request.method.upper() == 'PUT'\n and 'PLURAL_PUT' not in self.http_methods) \\\n or (self.request.method.upper() == 'DELETE'\n and 'PLURAL_DELETE' not in self.http_methods):\n return True\n return False",
"def is_valid(self):\n return self.post_processor in PostProcessor.valid_list()",
"def ispostSupportedClass(cls, tmpcls, session=None):\n if session is not None:\n cls.getclsoptions(tmpcls, session)\n if 'POST' in optionsdict[tmpcls]['OPTIONS']:\n return True\n return False",
"def post(self):\n # by default post is not supported\n return False",
"def skip(self):\n if not self.helper_view.newDataOnly():\n return False\n\n if self.request.steps[-1].startswith(\"++add++\"):\n return False\n if self.request.method != \"PATCH\":\n # restapi calls\n return False\n return True",
"def post(self):\n self.not_supported()",
"def post_k8s_updating_tasks(post_tasks=None):\n if post_tasks:\n for task in post_tasks:\n if callable(task):\n task()\n LOG.debug('Running mandatory tasks after updating proccess has finished.')",
"def check_available(rs):\n info = rs.get_cluster_info()\n if info['ClusterStatus'] == 'available':\n return True\n elif info['ClusterStatus'] == 'deleting':\n raise AttributeError(f'Cluster is currently deleting. Please wait and try again.\\n{info}')\n elif info['ClusterStatus'] == 'creating': \n return False\n \n raise NameError(f'Could not get cluster status information. Current information about the cluster: \\n{info}')",
"def cluster_create():\n logger.info(\"/cluster action=\" + r.method)\n request_debug(r, logger)\n if not r.form[\"name\"] or not r.form[\"host_id\"] or not \\\n r.form[\"consensus_plugin\"] or not r.form[\"size\"]:\n logger.warning(\"cluster post without enough data\")\n response_fail[\"error\"] = \"cluster POST without enough data\"\n response_fail[\"data\"] = r.form\n return jsonify(response_fail), CODE_BAD_REQUEST\n else:\n name, host_id, consensus_plugin, consensus_mode, size = \\\n r.form['name'], r.form['host_id'], r.form['consensus_plugin'],\\\n r.form['consensus_mode'] or CONSENSUS_MODES[0], int(r.form[\n \"size\"])\n if consensus_plugin not in CONSENSUS_PLUGINS:\n logger.debug(\"Unknown consensus_plugin={}\".format(\n consensus_plugin))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if consensus_plugin != CONSENSUS_PLUGINS[0] and consensus_mode \\\n not in CONSENSUS_MODES:\n logger.debug(\"Invalid consensus, plugin={}, mode={}\".format(\n consensus_plugin, consensus_mode))\n return jsonify(response_fail), CODE_BAD_REQUEST\n\n if size not in CLUSTER_SIZES:\n logger.debug(\"Unknown cluster size={}\".format(size))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if cluster_handler.create(name=name, host_id=host_id,\n consensus_plugin=consensus_plugin,\n consensus_mode=consensus_mode,\n size=size):\n logger.debug(\"cluster POST successfully\")\n return jsonify(response_ok), CODE_CREATED\n else:\n logger.debug(\"cluster creation failed\")\n response_fail[\"error\"] = \"Failed to create cluster {}\".format(\n name)\n return jsonify(response_fail), CODE_BAD_REQUEST",
"def is_modify_required(self, obj_fs, cap_unit):\n try:\n to_update = {}\n obj_fs = obj_fs.update()\n description = self.module.params['description']\n\n if description is not None and description != obj_fs.description:\n to_update.update({'description': description})\n\n size = self.module.params['size']\n if size and cap_unit:\n size_byte = int(utils.get_size_bytes(size, cap_unit))\n if size_byte < obj_fs.size_total:\n self.module.fail_json(msg=\"Filesystem size can be \"\n \"expanded only\")\n elif size_byte > obj_fs.size_total:\n to_update.update({'size': size_byte})\n\n tiering_policy = self.module.params['tiering_policy']\n if tiering_policy and self.get_tiering_policy_enum(\n tiering_policy) != obj_fs.tiering_policy:\n to_update.update({'tiering_policy':\n self.get_tiering_policy_enum(\n tiering_policy)})\n\n is_thin = self.module.params['is_thin']\n if is_thin is not None and is_thin != obj_fs.is_thin_enabled:\n to_update.update({'is_thin': is_thin})\n\n data_reduction = self.module.params['data_reduction']\n if data_reduction is not None and \\\n data_reduction != obj_fs.is_data_reduction_enabled:\n to_update.update({'is_compression': data_reduction})\n\n access_policy = self.module.params['access_policy']\n if access_policy and self.get_access_policy_enum(\n access_policy) != obj_fs.access_policy:\n to_update.update({'access_policy':\n self.get_access_policy_enum(access_policy)})\n\n locking_policy = self.module.params['locking_policy']\n if locking_policy and self.get_locking_policy_enum(\n locking_policy) != obj_fs.locking_policy:\n to_update.update({'locking_policy':\n self.get_locking_policy_enum(\n locking_policy)})\n\n snap_sch = obj_fs.storage_resource.snap_schedule\n\n if self.snap_sch_id is not None:\n if self.snap_sch_id == \"\":\n if snap_sch and snap_sch.id != self.snap_sch_id:\n to_update.update({'is_snap_schedule_paused': False})\n elif snap_sch is None or snap_sch.id != self.snap_sch_id:\n to_update.update({'snap_sch_id': self.snap_sch_id})\n\n smb_properties = self.module.params['smb_properties']\n if smb_properties:\n sync_writes_enabled = \\\n smb_properties['is_smb_sync_writes_enabled']\n oplocks_enabled = \\\n smb_properties['is_smb_op_locks_enabled']\n notify_on_write = \\\n smb_properties['is_smb_notify_on_write_enabled']\n notify_on_access = \\\n smb_properties['is_smb_notify_on_access_enabled']\n notify_on_change_dir_depth = \\\n smb_properties['smb_notify_on_change_dir_depth']\n\n if sync_writes_enabled is not None and \\\n sync_writes_enabled != obj_fs.is_cifs_sync_writes_enabled:\n to_update.update(\n {'is_cifs_sync_writes_enabled': sync_writes_enabled})\n\n if oplocks_enabled is not None and \\\n oplocks_enabled != obj_fs.is_cifs_op_locks_enabled:\n to_update.update(\n {'is_cifs_op_locks_enabled': oplocks_enabled})\n\n if notify_on_write is not None and \\\n notify_on_write != \\\n obj_fs.is_cifs_notify_on_write_enabled:\n to_update.update(\n {'is_cifs_notify_on_write_enabled': notify_on_write})\n\n if notify_on_access is not None and \\\n notify_on_access != \\\n obj_fs.is_cifs_notify_on_access_enabled:\n to_update.update(\n {'is_cifs_notify_on_access_enabled':\n notify_on_access})\n\n if notify_on_change_dir_depth is not None and \\\n notify_on_change_dir_depth != \\\n obj_fs.cifs_notify_on_change_dir_depth:\n to_update.update(\n {'cifs_notify_on_change_dir_depth':\n notify_on_change_dir_depth})\n if len(to_update) > 0:\n return to_update\n else:\n return None\n\n except Exception as e:\n errormsg = \"Failed to determine if FileSystem id: {0}\" \\\n \" modification required with error {1}\".format(obj_fs.id,\n str(e))\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)",
"def test_kyc_put_request_legal(self):\n pass",
"def _should_initialize_check_run(self, payload):\n action = payload.get('action')\n return action in self.initialize_actions or self.initialize_actions is None",
"def pre_k8s_updating_tasks(post_tasks, params_to_preserve):\n # pylint: disable-msg=broad-except\n rc = 0\n LOG.debug('Running mandatory tasks before update proccess start.')\n try:\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n except Exception as e:\n LOG.error('Loading kube_apiserver config [Detail %s].', e)\n return 1\n\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n advertise_address = m.group(1)\n LOG.debug(' advertise_address = %s', advertise_address)\n params_to_preserve['advertise-address'] = advertise_address\n\n def _post_task_update_advertise_address():\n \"\"\"This method will be executed in right after control plane has been initialized and it\n will update advertise_address in manifests/kube-apiserver.yaml to use mgmt address\n instead of oam address due to https://bugs.launchpad.net/starlingx/+bug/1900153\n \"\"\"\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)\n\n def _post_task_security_context():\n cmd = [\"sed\", \"-i\", \"/securityContext:/,/type: RuntimeDefault/d\", KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)\n\n post_tasks.append(_post_task_update_advertise_address)\n post_tasks.append(_post_task_security_context)\n\n return rc",
"def ready(self):\n if not self.is_setup:\n return False\n\n if self.pocs.observatory.mount.is_parked:\n print_warning('Mount is parked. To unpark run `unpark`')\n return False\n\n return self.pocs.is_safe()",
"def pre_flight_checks(self):\n #=======================================================================\n #\n # TODO: Place any system checks here.\n #\n #=======================================================================\n return True",
"def is_prepost(*args):\n return _ida_hexrays.is_prepost(*args)",
"def test_client_can_do_put_request(self):\n response = self.httpbin_4.test_requests_put_method()\n self.assertEqual(response.request.method, 'PUT')\n self.assertEqual(response.status_code, 200)",
"def check_condor(self):\n\n self.cluster=self.info['mw_run']['1']\n self.norm_with_cross=self.info['mw_run']['4']\n self.condor_req=self.info['mw_run']['11']\n\n #type is automaticaly updated now\n #self.cluster=int(condor)\n #if norm_with_cross==\"F\":\n # self.norm_with_cross=0\n #else:\n # self.norm_with_cross=1",
"def required():\n kernel = __salt__['grains.item']('os') # pylint: disable=E0602,E0603\n\n # Disable rebooting for HDP clusters until that works reliably\n hadoop_distro = __salt__['pillar.get']('hadoop.distro') # pylint: disable=E0602,E0603\n if hadoop_distro == 'HDP':\n return False\n\n if kernel['os'] == \"CentOS\" or kernel['os'] == \"RedHat\":\n try:\n current_version = __salt__['cmd.run']('uname -r') # pylint: disable=E0602,E0603\n latest_version = __salt__['cmd.run']('rpm -q --last kernel') # pylint: disable=E0602,E0603\n latest_version = latest_version.split(\" \")\n latest_version = [\n version for version in latest_version if 'kernel' in version]\n latest_version = str(latest_version[0]).strip('kernel-') # pylint: disable=E1310\n if current_version == latest_version:\n return False\n except: # pylint: disable=W0702\n return False\n return True\n\n return __salt__['file.file_exists']('/var/run/reboot-required') # pylint: disable=E0602,E0603",
"def prerequisites():\n if not 'T2_TOKEN' in os.environ:\n print(\"Error: Please supply T2_TOKEN as an environment variable.\")\n exit(1)\n if not 'T2_URL' in os.environ:\n print(\"Error: Please supply T2_URL as an environment variable.\")\n exit(1)\n if not os.path.isfile(\"/cluster.yaml\"):\n print(\"Error Please supply cluster definition as file in /cluster.yaml.\")\n exit(1)",
"def match_request(self, req):\n\n return req.method == 'POST' and req.path_info == '/bitbucketsync'",
"def need_feature_generation(self):\n if self.feature_cmd_params:\n return True\n return False",
"def need_feature_generation(self):\n if self.feature_cmd_params:\n return True\n return False",
"def validate_on_put_request(self, data, **kwargs):\n if request.method == \"PUT\":\n if \"bio\" not in data or \"website\" not in data:\n raise ValidationError(\"Missing one or more fields.\")",
"def needs_update(self, context) -> bool:\n raise NotImplementedError",
"def _check_controller_state():\n chosts = pecan.request.dbapi.ihost_get_by_personality(\n constants.CONTROLLER)\n\n for chost in chosts:\n utils.is_host_state_valid_for_fs_resize(chost)\n\n return True"
] |
[
"0.6787382",
"0.6160552",
"0.5965819",
"0.5765838",
"0.54389656",
"0.52548474",
"0.5172896",
"0.51714987",
"0.5161744",
"0.5144229",
"0.50794834",
"0.50763243",
"0.50758445",
"0.5068631",
"0.506405",
"0.50525963",
"0.5025663",
"0.50229084",
"0.50158024",
"0.49857804",
"0.4980957",
"0.4976503",
"0.49752486",
"0.49740872",
"0.49688995",
"0.49546072",
"0.49546072",
"0.4947931",
"0.49326557",
"0.49307066"
] |
0.65956247
|
1
|
Generate metadata and store them in a YAML file with given arguments.
|
def gen_metadata(args):
with open(args.bibfile) as bibfile:
bib_db = BibTexParser(common_strings=True).parse_file(bibfile)
entries = sorted(list(bib_db.entries),
key=lambda x: x['year'], reverse=True)
list([update_file(entry) for entry in entries])
annotations = [entry_to_annotation(entry, args.PI) for entry in entries]
stream = open(args.metadata, 'w')
yaml.dump(annotations, stream, width=192, default_flow_style=False)
stream.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def user_create_yaml(self):\n pass",
"def main(args):\n metafiles = []\n verbose = args.verbose\n\n if (args.metalist is not None):\n for listfile in args.metalist:\n metafiles.extend(addmeta.list_from_file(listfile))\n\n if (args.metafiles is not None):\n metafiles.extend(args.metafiles)\n\n if verbose: print(\"metafiles: \",\" \".join([str(f) for f in metafiles]))\n\n addmeta.find_and_add_meta(args.files, metafiles)",
"def task_generate_virtual_metadata():\n script = Path(__file__).parents[0] / \"generate_metadata_virtual_experiment.py\"\n metadata_files = Path(__file__).parent.glob('*_meta.yaml')\n\n return {\n \"actions\": [f\"{PYTHON_EXE} {script}\"],\n \"file_dep\": [script],\n \"verbosity\": 2, # show stdout\n \"targets\": [*metadata_files],\n 'uptodate': [len(list(metadata_files)) > 0],\n }",
"def assemble(metadata_file):\n\n def read(file):\n with open(file) as yaml:\n return load(yaml.read())\n\n def add_name(info):\n info['name'] = slugify(info['title'], separator='_')\n return info\n\n def get_files(filetype):\n filename = metadata_file.replace('metadata', filetype)\n folder = dirname(metadata_file)\n schema_files_pattern = join(folder, filename)\n return glob(schema_files_pattern)\n\n descriptor = add_name(read(metadata_file))\n resources = [add_name(read(file)) for file in get_files('resource')]\n model = get_files('model')\n\n descriptor['resources'] = resources\n if model and len(model) == 1:\n descriptor['model'] = model.pop()\n\n return DataPackage(descriptor)",
"def create_metadata(scene: \"Scenemaker\") -> None:\r\n create_datadir()\r\n\r\n with open(dirpath / cng.GENERATED_DATA_DIR / cng.METADATA_FILE, \"w+\") as f:\r\n f.write(str(scene.num2name))",
"def register_metadata(metadata_file, config):\n metadata_list = []\n import sys\n if metadata_file in sys.argv:\n return config\n try:\n with open(metadata_file, \"r\") as fh:\n reader = csv.DictReader(fh.readlines())\n metadata_list = [row for row in reader]\n run2sample = {row[\"Run\"]:row[\"SampleName\"] for row in metadata_list}\n config_default = {\n 'bio.ngs.settings' : {\n 'sampleinfo' : metadata_file\n },\n 'bio.ngs.tools.sratools': {\n '_datadir': os.path.dirname(metadata_file),\n '_run2sample' : run2sample,\n '_metadata' : metadata_list\n },\n }\n update_config(config_default, config)\n config = config_default\n \n except Exception:\n raise Exception(\"\"\"\n\n no metadata file '{metadata}' found\n\n please initiate analysis by running 'snakemake {metadata}'\n\n \"\"\".format(metadata=metadata_file))\n return config",
"def handle(self, *args, **options):\n file = StringIO()\n call_command(\"generateschema\", stdout=file)\n file.seek(0)\n document = yaml.load(file, Loader=yaml.FullLoader)\n document.update({\n \"externalDocs\": {\n \"description\": \"Check us out on GitHub\",\n \"url\": \"https://github.com/ractf\",\n },\n \"info\": {\n \"title\": \"RACTF Core\",\n \"version\": os.popen(\"git rev-parse HEAD\").read().strip()[:8],\n \"description\": \"The API for RACTF.\",\n \"contact\": {\n \"name\": \"Support\",\n \"email\": \"[email protected]\",\n \"url\": \"https://reallyawesome.atlassian.net/servicedesk/customer/portals\",\n },\n \"x-logo\": {\n \"url\": \"https://www.ractf.co.uk/brand_assets/combined/wordmark_white.svg\",\n \"altText\": \"RACTF Logo\",\n },\n }\n })\n print(yaml.dump(document))",
"def generate(self):\n logger.info(\"Starting yml generation..\")\n if not self.is_generatable_file:\n logger.error(\n f\"[red]Not running file {self.filename} without metadata collector.[/red]\"\n )\n return\n # Collect the wrapped functions with the details.\n self.collect_functions()\n # Make sure when they are ran, only collecting data will be performed.\n if self.metadata_collector:\n self.metadata_collector.set_collect_data(True)\n # Run the functions and by that, collect the data.\n self.run_functions()\n # Write the yml file according to the collected details.\n self.extract_metadata()\n # Make sure the functions are back to normal running state.\n if self.metadata_collector:\n self.metadata_collector.set_collect_data(False)\n # Remove imports from file\n self.remove_collector_imports()",
"def cli(yamlfile, **args):\n print(ShExGenerator(yamlfile, **args).serialize(**args))",
"def createSpecWithMetadataInYamlAndUserdata(self):\n metadataYamlFilePath = os.path.join(os.path.dirname(\n os.path.realpath(__file__)),\n 'sample_metadata.yaml')\n userdataFilePath = os.path.join(os.path.dirname(\n os.path.realpath(__file__)),\n 'sample_userdata')\n with open(metadataYamlFilePath, \"r\") as fp:\n self.metadata = fp.read().rstrip('\\n')\n with open(userdataFilePath, \"r\") as fp:\n self.userdata = fp.read().rstrip('\\n')\n self.createCloudinitDataSpec('specWithMetadataInYamlAndUserdata',\n 'linux cloud-init data customization spec'\n 'with metadata in yaml format and '\n 'userdata')",
"def generate_metadata(self):\n self.metadata = {\n 'title': os.path.basename(self.source_file).rsplit('.', 1)[0],\n 'url': self.relative_destination_file,\n 'full_path': os.path.dirname(self.relative_destination_file),\n 'short_path': self.shorten_path(\n os.path.dirname(self.relative_destination_file))\n }",
"def setup_from_args(ma, args):\n ma.out_file = ma.fix_path(args.output)\n ma.deck_id = ma.dm.id(args.deck)\n ma.model_name = _(u'Subtitles ({})').format(args.deck)\n if args.language_names:\n ma.language_names = args.language_names\n if args.Japanese:\n ma.japanese = True\n ma.use_readings = True\n if args.Chinese:\n ma.chinese = True\n ma.use_readings = True\n ma.subtitle_files = args.subtitles",
"def task_generate_virtual_samples():\n metadata_files = Path(__file__).parent.glob('*_meta.yaml')\n data_files = Path(__file__).parent.glob('*_data.yaml')\n\n script = Path(__file__).parents[0] / \"virtual_experiment.py\"\n\n return {\n \"actions\": [f\"{PYTHON_EXE} {script}\"],\n \"file_dep\": [script, *metadata_files],\n \"verbosity\": 2, # show stdout\n \"targets\": [*data_files],\n \"setup\": [\"generate_virtual_metadata\"],\n \"clean\": [clean_targets]\n }",
"def generate(filename, output_path):\n with open(filename) as yaml_file:\n yaml_str = yaml_file.read()\n repository = YamlRepository(yaml_str)\n podcast_generate_uc = PodcastGenerate(repository, output_path)\n podcast_generate_uc.execute()\n click.echo(\"Done!\")",
"def create_file_meta_data(vk4_container, args):\n log.debug(\"Entering create_file_meta_data()\")\n\n header_list = list()\n header_list.append(args.layer)\n header_list.append('\\n')\n header_list.append('File name')\n header_list.append(args.input)\n header_list.append('Title')\n header_list.append(args.input[:-4])\n header_list.append('Measurement date')\n header_list.append(str(vk4_container.measurement_conditions['month']) + '\\\\' +\n str(vk4_container.measurement_conditions['day']) + '\\\\' +\n str(vk4_container.measurement_conditions['year']))\n header_list.append('Measurement time')\n header_list.append(str(vk4_container.measurement_conditions['hour']) + ':' +\n str(vk4_container.measurement_conditions['minute']) + ':' +\n str(vk4_container.measurement_conditions['second']))\n # User mode?\n header_list.append('Objective lens')\n header_list.append(vk4_container.string_data['lens_name'] + ' ' +\n str(vk4_container.measurement_conditions['lens_magnification'] / 10.0) + 'x')\n header_list.append('Numerical Aperture')\n header_list.append(vk4_container.measurement_conditions['num_aperture'] / 1000.0)\n # Size? Standard?\n # Mode? Surface profile?\n # RPD? OFF?\n header_list.append('Quality')\n header_list.append('Skip 4 lines')\n header_list.append('Pitch (um)')\n header_list.append(vk4_container.measurement_conditions['pitch'] / 1000.0)\n header_list.append('Z measurement distance (um)')\n header_list.append(vk4_container.measurement_conditions['distance'] / 1000.0)\n # Double scan? OFF?\n header_list.append('Brightness 1')\n header_list.append(vk4_container.measurement_conditions['PMT_gain'])\n header_list.append('Brightness 2')\n br_2 = vk4_container.measurement_conditions['PMT_gain_2']\n header_list.append('---') if br_2 == 0 else header_list.append(br_2)\n # Not sure how they got ND filter to 30% in example csv\n header_list.append('ND filter (%)')\n header_list.append(vk4_container.measurement_conditions['ND_filter'] * 30)\n header_list.append('Optical zoom')\n header_list.append(vk4_container.measurement_conditions['optical_zoom'] / 10.0)\n # Average count? 1 time?\n # Filter? OFF?\n # Fine mode? ON?\n header_list.append('Line count')\n l_count = vk4_container.measurement_conditions['number_of_lines']\n header_list.append(l_count)\n\n header_list.append('Line position1')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][0])\n\n header_list.append('Line position2')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][1])\n\n header_list.append('Line position3')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][2])\n\n header_list.append('Camera gain (db)')\n header_list.append(vk4_container.measurement_conditions['camera_gain'] * 6)\n header_list.append('Shutter speed')\n header_list.append(vk4_container.measurement_conditions['shutter_speed'])\n header_list.append('White balance mode')\n wb_mode = vk4_container.measurement_conditions['white_balance_mode']\n header_list.append('Auto') if wb_mode == 1 else header_list.append(wb_mode)\n header_list.append('White balance R')\n header_list.append(vk4_container.measurement_conditions['white_balance_red'])\n header_list.append('White balance B')\n header_list.append(vk4_container.measurement_conditions['white_balance_blue'])\n header_list.append('Intensity correction mode')\n header_list.append('Gamma correction')\n header_list.append('Gamma correction value')\n header_list.append(vk4_container.measurement_conditions['gamma'] / 100.0)\n header_list.append('Gamma offset (%)')\n header_list.append(vk4_container.measurement_conditions['gamma_correction_offset'] /\n 65536.0)\n # W/B inversion? OFF?\n # Head type? VK-X110?\n # Correct intensity eccentricity? OFF?\n # Correct field curvature? OFF?\n header_list.append('XY calibration (nm/pixel)')\n header_list.append(vk4_container.measurement_conditions['x_length_per_pixel'] / 1000.0)\n header_list.append('Z calibration (nm/digit)')\n header_list.append(vk4_container.measurement_conditions['z_length_per_digit'] / 1000.0)\n # Saturation?\n # Contrast?\n # Brightness?\n # AI noise elimination? Auto(ON)?\n # Angled surface noise filter? Auto(OFF)?\n header_list.append('Width')\n header_list.append(vk4_container.image_width)\n header_list.append('Height')\n header_list.append(vk4_container.image_height)\n # Skip amount? 1?\n\n out_type = args.type\n if out_type == 'hcsv':\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return np.reshape(header_list, (len(header_list) // 2, 2))\n else:\n # Can use a dict to attach info to an image using PILs Image module\n meta_dict = dict()\n for n in range(0, len(header_list), 2):\n meta_dict[header_list[n]] = header_list[n + 1]\n\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return meta_dict",
"def main() -> None:\n\n cache: Dict[str, Any] = {}\n datadir = util.get_abspath(sys.argv[1])\n for yaml_path in glob.glob(os.path.join(datadir, \"*.yaml\")):\n with open(yaml_path) as yaml_stream:\n cache_key = os.path.relpath(yaml_path, datadir)\n cache[cache_key] = yaml.load(yaml_stream)\n\n cache_path = os.path.join(datadir, \"yamls.pickle\")\n with open(cache_path, \"wb\") as cache_stream:\n pickle.dump(cache, cache_stream)",
"def main(argv: List[str]) -> None:\n parser = utils.ArgumentParser()\n parser.add_ecore_root_argument(\"-r\", \"--root\")\n\n parser.parse_args(argv[1:])\n generate_yaml()",
"def main(cls, **kwargs):\n try:\n import file_transformer\n except Exception as e:\n sys.exit(\"{}\\nSee https://github.com/benkehoe/file-transformer\".format(e))\n \n def loader(input_stream, args):\n return yaml.load(input_stream)\n \n def processor(input, args):\n transform = cls(input, vars(args))\n transform.apply()\n return transform.template\n \n def dumper(output, output_stream, args):\n yaml.dump(output, output_stream)\n \n return file_transformer.main(processor, loader, dumper, **kwargs)",
"def compose_metadata(cmd, solo_flags, arg_flags, infile, outfile, full_cmd):\n return {\n 'outfile': os.path.split(outfile)[-1],\n 'cmd': cmd,\n 'infile': infile,\n 'solo_flags': solo_flags,\n 'arg_flags': {k.lstrip('-'):v for k, v in arg_flags},\n 'full_cmd': full_cmd\n }",
"def create_yaml(self):\n if self._language == PYTHON:\n language_str = 'python'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._python_dependencies()\n elif self._language == NODE:\n language_str = 'node'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._node_dependencies()\n elif self._language == DOTNET:\n language_str = 'dotnet'\n package_route = '$(System.DefaultWorkingDirectory)/publish_output/s'\n dependencies = self._dotnet_dependencies()\n elif self._language == POWERSHELL:\n language_str = 'powershell'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._powershell_dependencies()\n else:\n raise LanguageNotSupportException(self._language)\n\n if self._app_type == WINDOWS:\n platform_str = 'windows'\n yaml = self._generate_yaml(dependencies, 'VS2017-Win2016', language_str, platform_str, package_route)\n else:\n platform_str = 'linux'\n yaml = self._generate_yaml(dependencies, 'ubuntu-16.04', language_str, platform_str, package_route)\n\n with open('azure-pipelines.yml', 'w') as f:\n f.write(yaml)",
"def parse_args():\n parser = argparse.ArgumentParser(\"generate_scenarios.py\")\n add_arg = parser.add_argument\n add_arg(\"config\", nargs=\"?\", default=\"config.yaml\")\n # add_arg('-d', '--distributed', action='store_true')\n add_arg(\"-v\", \"--verbose\", action=\"store_true\")\n # parameters which override the YAML file, if needed\n #\n return parser.parse_args()",
"def parse_args(args):\n\n parser = argparse.ArgumentParser(description=\"Add meta data to one or more netCDF files\")\n\n parser.add_argument(\"-m\",\"--metafiles\", help=\"One or more meta-data files in YAML format\", action='append')\n parser.add_argument(\"-l\",\"--metalist\", help=\"File containing a list of meta-data files\", action='append')\n parser.add_argument(\"-v\",\"--verbose\", help=\"Verbose output\", action='store_true')\n parser.add_argument(\"files\", help=\"netCDF files\", nargs='+')\n\n return parser.parse_args(args)",
"def cli(yamlfile, **args):\n print(LogicProgramGenerator(yamlfile, **args).serialize(**args))",
"def rebuild_from_yaml(args):\n\n git_checkout_branch('gh-pages')\n\n posts = []\n for fname in glob('_posts/*.html'):\n with codecs.open(fname, 'r', 'utf-8') as f:\n c = f.read()\n # we only want the yaml frontmatter\n start = c.index('---') + 3\n end = c.rindex('---')\n frontmatter = yaml.safe_load(c[start:end])\n\n posts.append(Post(**frontmatter['api_data']['post']))\n\n _write_out(posts, yaml=False, supporting=True)",
"def create_meta(prefix, dist, info_dir, extra_info):\n # read info/index.json first\n with open(join(info_dir, 'index.json')) as fi:\n meta = json.load(fi)\n # add extra info\n meta.update(extra_info)\n # write into <prefix>/conda-meta/<dist>.json\n meta_dir = join(prefix, 'conda-meta')\n if not isdir(meta_dir):\n os.makedirs(meta_dir)\n with open(join(meta_dir, 'history'), 'w') as fo:\n fo.write('')\n with open(join(meta_dir, dist + '.json'), 'w') as fo:\n json.dump(meta, fo, indent=2, sort_keys=True)",
"def save_meta_file(gen_dict, f_name):\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n filename = run_id+'_'+ f_name +'.meta'\r\n f = open(os.path.join(unique_op_dir, filename),'a')\r\n print('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n logger.info('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n for key, val in gen_dict.items():\r\n line = str(key)+\" : \"+str(val)+\"\\n\"\r\n f.write(line)",
"def populateMeta(self, *args):\n meta = self._getAllMeta()\n if not meta:\n raise MetaReadError(\"Error Reading Image MetaData, has image finished copying?\")\n else:\n self.exifKeys = self._getAllMetaKeys(meta)\n for key in self.exifKeys:\n if key == self._getExifKey_TimeCode():\n tag = meta[self._getExifKey_TimeCode()]\n self.startTimecode = tag.raw_value\n self._splitTimecode()\n \n if args:\n for arg in args:\n try:\n lTag = meta[arg]\n self.__dict__[arg.split('.')[1] + '_' + arg.split('.')[2]] = lTag.raw_value\n except:\n print 'could not get meta for tag ', arg",
"def write_metadata(file_name, metadata_dict, category='',\n datatype=\"inventory\", parameters=None):\n if (datatype == \"inventory\") or (datatype == \"source\"):\n meta = set_stewi_meta(file_name, stewiformat=category)\n if datatype == 'inventory':\n meta.tool_meta = {\"parameters\": parameters,\n \"sources\": metadata_dict}\n else:\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)\n elif datatype == \"validation\":\n file = (paths.local_path / 'validation' /\n f'{file_name}_validationset_metadata.json')\n with file.open('w') as fi:\n fi.write(json.dumps(metadata_dict, indent=4))",
"def extract_meta_data(video_file_name, output_file=meta.txt, *args, **kwargs):",
"def load_multiple_yaml_file(self, args):\n sList = []\n for file in args:\n with open (file , \"r\") as stream:\n sList.append(stream.read())\n fString = ''\n for s in sList:\n fString = fString + \"\\n\" + s\n self.data = yaml.load(fString)"
] |
[
"0.6481894",
"0.6460003",
"0.63430345",
"0.6327105",
"0.61623716",
"0.6149009",
"0.61312765",
"0.612239",
"0.59599483",
"0.5936118",
"0.5773244",
"0.5772877",
"0.5702008",
"0.57003224",
"0.5686346",
"0.5672096",
"0.5651589",
"0.5533003",
"0.55324817",
"0.5531138",
"0.55146885",
"0.5513317",
"0.55129135",
"0.5505007",
"0.550047",
"0.5487816",
"0.54752594",
"0.5469851",
"0.5459131",
"0.5446542"
] |
0.71669436
|
0
|
To parse command line and process the paper organization task.
|
def organize_bib():
parser = argparse.ArgumentParser(description='Organize papers')
parser.add_argument('bibfile', help='The bibtex file to be processed.')
subparsers = parser.add_subparsers(help='Available commands')
help_tmpl = 'Create a YAML file describing metadata for the organization tasks.'
parser_tmpl = subparsers.add_parser('genmetadata', help=help_tmpl)
parser_tmpl.add_argument('--PI', default='Qi, Fei',
help='The name of the PI.')
parser_tmpl.add_argument('--metadata', '-m',
help='Output metadata in a YAML file.')
parser_tmpl.set_defaults(func=gen_metadata)
help_patch = 'Create attachements for uploading to NSFC website.'
parser_attach = subparsers.add_parser('genattach', help=help_patch)
parser_attach.add_argument('--metadata', '-m',
help='Metadata in YAML format specifying the task.')
parser_attach.add_argument('--script', '-s',
help='Bash script for creating tasks.')
parser_attach.set_defaults(func=gen_attachments)
help_extract = 'Extract bib items based on a given aux file.'
parser_extract = subparsers.add_parser('extract', help=help_extract)
parser_extract.add_argument('--auxfile', '-x',
help='AUX file created by LaTeX.')
parser_extract.add_argument('--output', '-o',
help='Output of cited references in BibTeX format.')
parser_extract.set_defaults(func=extract_bibtex)
help_simplify = 'Simplify a BibTeX file.'
parser_simplify = subparsers.add_parser('simplify', help=help_simplify)
parser_simplify.add_argument('--bib-journal', '-b',
help='BibTeX of canonical journal/conference titles.')
parser_simplify.add_argument('--output', '-o',
help='Output of simplified BibTeX.')
parser_simplify.set_defaults(func=simplify_bibtex)
args = parser.parse_args()
# print(args)
args.func(args)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def handle_program_options():\n parser = argparse.ArgumentParser(description=\"Gather numeric information \\\n about the processed sequence data in an \\\n MG-RAST project.\")\n parser.add_argument('project_id',\n help=\"The project identifier (MG-RAST ID)\")\n parser.add_argument('-a', '--auth_key',\n help=\"An MG-RAST API authorization key. This is \\\n necessary to access projects marked as private.\")\n parser.add_argument('-g', '--group_by', action='append',\n help=\"A string that matches some part of the \\\n 'Metagenome Name' field. All matching project \\\n metagenomes will be grouped by this identifier \\\n and their stats will be summed. This option can \\\n be specified multiple times to create multiple \\\n groups. All non-matching metagenomes will \\\n appear separately in the table. NOTE: \\\n Strings will be matched longest first. This \\\n allows for matching names that might be a \\\n substring of another match. For example: -g S \\\n -g NS. The name field will first be matched \\\n against the longest string (NS) first and then \\\n each smaller string in order.\")\n parser.add_argument('-o', '--output_filename', default='meta_stats.txt',\n help=\"The name of the file the project summary \\\n information will be written to.\")\n\n# parser.add_argument('-v', '--verbose', action='store_true')\n\n return parser.parse_args()",
"def process_command_line():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"srcDir\", type=str, help=\"Directory containing Unit Hydrograph grids to be aggregated\")\n parser.add_argument(\"gridFile\", type=str, help=\"Input netCDF target grid\")\n parser.add_argument(\"--remapDir\", type=str, help=\"Directory containing Output Unit Hydrograph grids\")\n parser.add_argument(\"--aggDir\", type=str, help=\"Directory where to store aggregated files (before remap)\")\n parser.add_argument(\"--inPrefix\", type=str, help=\"Input Unit Hydrograph File Prefix (default=UH_)\",default='UH_')\n parser.add_argument(\"--outPrefix\", type=str, help=\"Output Unit Hydrograph File Prefix (default=Agg_UH_)\", default=\"Agg_UH_\")\n parser.add_argument(\"--time\", type=str, help=\"Input Unit Hydrograph time variable name (default=time)\",default='time')\n parser.add_argument(\"--lon\", type=str, help=\"Input Unit Hydrograph longitude variable name (default=lon)\",default='lon')\n parser.add_argument(\"--lat\", type=str, help=\"Input Unit Hydrograph latitude variable name (default=lat)\",default='lat')\n parser.add_argument(\"--fraction\", type=str, help=\"Input Unit Hydrograph fraction variable name (default=fraction)\",default='fraction')\n parser.add_argument(\"--unit_hydrograph\",type=str, help=\"Input unit hydrograph variable name (default=unit_hydrograph)\",default='unit_hydrograph')\n parser.add_argument(\"--xc\", type=str, help=\"Input target grid longitude variable (default=xc)\",default='xc')\n parser.add_argument(\"--yc\", type=str, help=\"Input target grid latitude variable (default=yc)\",default='yc') \n parser.add_argument(\"--testAgg\",help=\"Do a test aggregation, where all inpoint points are aggregated into one file, remapping can be done afterwards using the --remap flag\",action=\"store_true\")\n parser.add_argument(\"--cdoDebug\",help=\"Enable CDO debuging (prings each step to screen)\",action=\"store_true\")\n parser.add_argument(\"--cdoForce\",help=\"Enable CDO force output (will overwrite existing files during remap)\",action=\"store_true\")\n parser.add_argument(\"--verbose\",help=\"Make script verbose\",action=\"store_true\")\n parser.add_argument(\"--remap\",help=\"Remap the aggregated Unit Hydrographs to outDir and put the aggregated files in the tempDir\",action='store_true')\n parser.add_argument(\"--agg\",help=\"Aggregate the input files onto the targetGrid (gridFile)\",action='store_true')\n parser.add_argument(\"--fill_value\",type=float,help=\"value to use as masked value (default=9.96920996839e+36)\",default = 9.96920996839e+36)\n parser.add_argument(\"--pad\",type=int,help=\"Set number of empty cells to include around each aggregated basin (default=10)\",default=10)\n parser.add_argument(\"--resolution\",type=float,help=\"Set resolution of input Unit Hydrographs (default=1/16.)\",default=1/16.)\n parser.add_argument(\"--clean\",help=\"Clean up aggregated Unit Hydrograph grids if remapping\", action='store_true')\n parser.add_argument(\"--dryrun\",help=\"Do the mapping between the source and target grid based on the files in the input directory, return the performance stats for the run\", action='store_true')\n args = parser.parse_args()\n\n options = {}\n paths = {}\n # parse the basics\n Rvars = (args.time,args.lon,args.lat,args.fraction,args.unit_hydrograph)\n Cvars = (args.yc,args.xc)\n paths['srcDir'] = args.srcDir\n paths['gridFile'] = args.gridFile\n\n if args.aggDir:\n paths['aggDir'] = args.aggDir\n else:\n paths['aggDir'] = os.path.join(paths['srcDir'],'../aggregated/')\n if not os.path.exists(paths['aggDir']):\n os.makedirs(paths['aggDir'])\n\n options['verbose'] = args.verbose\n options['fill_value'] = args.fill_value\n options['pad'] = args.pad\n options['resolution'] = args.resolution\n options['inPrefix'] = args.inPrefix\n options['outPrefix'] = args.outPrefix\n options['dryrun'] = args.dryrun\n options['testAgg'] = args.testAgg\n options['clean']=args.clean\n options['remap']=args.remap\n options['agg']=args.agg\n \n if options['remap']:\n cdo.debug=args.cdoDebug\n cdo.forceOutput=args.cdoForce\n if args.remapDir:\n paths['remapDir'] = args.remapDir\n else:\n paths['remapDir'] = os.path.join(paths['srcDir'],'../remaped/')\n if not os.path.exists(paths['remapDir']):\n os.makedirs(paths['remapDir'])\n print paths['remapDir'] \n\n return Rvars,Cvars,paths,options",
"def main():\n args = parse_args()\n process_args(args)",
"def main(args=None):\n parser = argparse.ArgumentParser()\n parser.set_defaults(func=None)\n\n subs = parser.add_subparsers(title='Commands')\n\n p_export = subs.add_parser(\n 'export',\n help='run dataset export commands to produce new parcels')\n p_export.add_argument('path', nargs='+', help='dataset dir path')\n p_export.add_argument('-p', '--parcel_id', nargs='?',\n help='parcel_id for new parcel '\n '(format: yyyy-mm-ddThhmmssZ) '\n '(defaults to current timestamp)')\n p_export.set_defaults(func=_export)\n\n p_init = subs.add_parser('init', help='initialize new dataset dirs')\n p_init.add_argument('path', nargs='+', help='dataset dir path')\n p_init.add_argument('-g', '--git', action='store_true',\n help='initialize a git repo')\n p_init.set_defaults(func=_init)\n\n p_process = subs.add_parser(\n 'process',\n help='run ingestion and exports, update metrics, '\n 'and perform cleaning, where needed')\n p_process.add_argument('path', nargs='+', help='dataset dir path')\n p_process.set_defaults(func=_process)\n\n p_report = subs.add_parser('report', help='summarize export activity')\n p_report.add_argument('path', nargs='+', help='dataset dir path')\n p_report.set_defaults(func=_report)\n\n p_reprocess_metrics = subs.add_parser('reprocess_metrics',\n help='update metrics for parcels')\n p_reprocess_metrics.add_argument(\n 'path', nargs='+', help='dataset dir path')\n p_reprocess_metrics.add_argument(\n '-p', '--parcel_id', nargs='?',\n help='only reprocess specific parcel (format: yyyy-mm-ddThhmmssZ)')\n p_reprocess_metrics.set_defaults(func=_reprocess_metrics)\n\n args = parser.parse_args(args)\n if not args.func:\n parser.print_help()\n return 1\n return args.func(args)",
"def readCommand( argv ): ## argv belongs to the 'sys'-library and can be called through sys.argv. The function reads the console's comand line argument and passes it to a variable like so: args = sys.argv[1:]\n from optparse import OptionParser ## Option Parser is a powerful library for passing command line options (an advanced args) if you like. It allows you to add options by defining attributes. \n usageStr = \"\"\" \n USAGE: python pacman.py <options> \n EXAMPLES: (1) python pacman.py\n - starts an interactive game\n (2) python pacman.py --layout smallClassic --zoom 2\n OR python pacman.py -l smallClassic -z 2\n - starts an interactive game on a smaller board, zoomed in\n \"\"\" \n parser = OptionParser(usageStr) ## This creates the Option Parser instance. It also passes the usageStr which functions as a little help-text for the user.\n\n ### In this section all the option strings are defined. Typically each option has one short option string and one long option string. For example the parser.add_option('-n'... has '-n' as short and '--numGames' as the long option string. Both have the same effect. The option argument will be the same and be saved as the variabme 'numGames'. \n parser.add_option('-n', '--numGames', dest='numGames', type='int', \n help=default('the number of GAMES to play'), metavar='GAMES', default=1) ## the syntax for the options is (based on the example in this line) --n 3. This means that the value '3' would be assigned to the variable numGames.\n parser.add_option('-l', '--layout', dest='layout',\n help=default('the LAYOUT_FILE from which to load the map layout'), #The instance -> 'options.layout' defines the layout_file from which to load the map layout; DEFAULT = medium_classic\n metavar='LAYOUT_FILE', default='mediumClassic')\n parser.add_option('-p', '--pacman', dest='pacman',\n help=default('the agent TYPE in the pacmanAgents module to use'), #The instance -> 'options.pacman' defines which of the agent TYPE in the pacmanAgents moduleto use.\n metavar='TYPE', default='KeyboardAgent')\n parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',\n help='Display output as text only', default=False)\n parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',\n help='Generate minimal output and no graphics', default=False)\n parser.add_option('-g', '--ghosts', dest='ghost',\n help=default('the ghost agent TYPE in the ghostAgents module to use'),\n metavar = 'TYPE', default='RandomGhost')\n parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',\n help=default('The maximum number of ghosts to use'), default=4)\n parser.add_option('-z', '--zoom', type='float', dest='zoom',\n help=default('Zoom the size of the graphics window'), default=1.0)\n parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',\n help='Fixes the random seed to always play the same game', default=False)\n parser.add_option('-r', '--recordActions', action='store_true', dest='record',\n help='Writes game histories to a file (named by the time they were played)', default=False)\n parser.add_option('--replay', dest='gameToReplay',\n help='A recorded game file (pickle) to replay', default=None)\n parser.add_option('-a','--agentArgs',dest='agentArgs',\n help='Comma separated values sent to agent. e.g. \"opt1=val1,opt2,opt3=val3\"')\n parser.add_option('-x', '--numTraining', dest='numTraining', type='int',\n help=default('How many episodes are training (suppresses output)'), default=0)\n parser.add_option('--frameTime', dest='frameTime', type='float',\n help=default('Time to delay between frames; <0 means keyboard'), default=0.1)\n parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',\n help='Turns on exception handling and timeouts during games', default=False)\n parser.add_option('--timeout', dest='timeout', type='int',\n help=default('Maximum length of time an agent can spend computing in a single game'), default=30)\n\n #ONCE ALL THE OPTIONS HAVE BEEN DEFINED, optparse is instructed to parse the programm's command line.\n ##> The parser.parse_args() returns two values:\n ### (A) OPTIONS: An object containing values for all of your options e.g.:e.g. if --file takes a single string argument, then options.file will be the filename supplied by the user, or None if the user did not supply that option\n ### (B) ARGS: The list of positional arguments leftover after parsing options (we call this here otherjunk)\n options, otherjunk = parser.parse_args(argv) ## if the user happens to accidentally enter a command other than the specified arguments specified by parser.add_option it is passed to otherjunk\n if len(otherjunk) != 0: ## if there actually ends up to be a value in the otherjunk the program raises an Exception.\n raise Exception('Command line input not understood: ' + str(otherjunk))\n args = dict() # ARGS IS THE VARIABLE THAT IS BEING RETURNED BY THE readCommand function.\n\n # Fix the random seed\n if options.fixRandomSeed: random.seed('cs188') # 'random.seed' is part of the random class. The random.seed([x]) command initialises a standard random number. Optional argument x can be any hashable object. \n\n # Choose a layout\n args['layout'] = layout.getLayout( options.layout ) # REF_LAYOUT111: layout.py --> This function returns the layout object that was created by the layout class via the getlayout function. This contains the height, width, walls, food, captules and agent positions etc.\n if args['layout'] == None: raise Exception(\"The layout \" + options.layout + \" cannot be found\")\n\n # Choose a Pacman agent\n noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics) ## noKeyboard is set to TRUE if the user chooses the --replay and text- or silent graphics option.\n ##print noKeyboard\n pacmanType = loadAgent(options.pacman, noKeyboard) ## [see REFERENCE_001]: the loadAgent function takes the pacman argument the user passed into the command line as the option--pacman option identifies the appropriate agent (which may be the programmed agent or whost agent). \n agentOpts = parseAgentArgs(options.agentArgs) ##Passes the option.agentArgs which was captured by the user's console input into the agentOps variable. agentArgs is: \"Comma separated values sent to agent. e.g. \"opt1=val1,opt2,opt3=val3. The ParseAgentArgs function converts the option - value pairings into a dictionary formatted opts[opt1] = val1. \n if options.numTraining > 0: ##numTraining was captured by the user's console input and designates how many games are training games which means that the output remains surpressed.\n args['numTraining'] = options.numTraining ## This takes the user's input as the -x or --numTraining and passes it to the args dictionary with the numTraining key as the args['numTraining'] variable.\n if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining ## This integrates the variable entered into as training rounds in the agentOpts variable.\n pacman = pacmanType(**agentOpts) ## REFERENCE002 ##Instantiate Pacman with agentOpts. ## The variable pacmanType contains a reference to agent module loaded by the load Agent function. This function does not cause the module to be instanciated. This happens when here ## See[REFERENCE_001]: ## The * and ** will 'soak up' any remaining values not otherwise accounted for. In this case these options are basically the agent options the user can input.\n ## agentOpts contains the opts dictionary = {opt1:val1, opt2:val2, opt3:val3}; it also contains the numTraining variable as the ['numTraining'] key. As such it has the following structure. {opt1:val1,opt2:val2,opt3:val3, numTraining:int}.\n args['pacman'] = pacman ## This passes the instanciated object to the agent dictionary containing the pacman key.\n\n # Don't display training games\n if 'numTrain' in agentOpts: ## Checks whether the user has determined a certain number of training games. If they did, the number is passed on as an int to the options.numQuiet and option.numIgnore variables.\n options.numQuiet = int(agentOpts['numTrain']) \n options.numIgnore = int(agentOpts['numTrain'])\n\n # Choose a ghost agent\n ghostType = loadAgent(options.ghost, noKeyboard) ## The options.ghost variable contains the user's ghost type preference as specified in the console.The user can choose between -g RandomGhost which is A ghost that chooses a legal action uniformly at random OR DirectionalGhost, a ghost that prefers to rush Pacman, or flee when scared.\n args['ghosts'] = [ghostType( i+1 ) for i in range( options.numGhosts )] #instanciates as many ghost agents as the player requested by entering the desired number as -k', '--numghosts'in the console.\n\n # Choose a display format ##contains whether the game output is displayed as minimal output and no graphics (-q) text only (-t) or via graphicsDiplay (standard)\n if options.quietGraphics: \n import textDisplay\n args['display'] = textDisplay.NullGraphics()\n elif options.textGraphics:\n import textDisplay\n textDisplay.SLEEP_TIME = options.frameTime\n args['display'] = textDisplay.PacmanGraphics()\n else:\n import graphicsDisplay ## This refers to the module that is responsible for the graphical representation of the game.\n args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime = options.frameTime) ## This line instanciates the PacmanGraphics class from the graphicsDisplay module and passes the reference to the args['display'] dictionary.\n args['numGames'] = options.numGames \n args['record'] = options.record\n args['catchExceptions'] = options.catchExceptions\n args['timeout'] = options.timeout\n\n # Special case: recorded games don't use the runGames method or args structure\n if options.gameToReplay != None:\n print 'Replaying recorded game %s.' % options.gameToReplay \n import cPickle\n f = open(options.gameToReplay)\n try: recorded = cPickle.load(f)\n finally: f.close()\n recorded['display'] = args['display']\n replayGame(**recorded)\n sys.exit(0)\n\n return args #returns the args-dictionary which contains:\n ##args['pacman'] which contains a dictionary of dictionaries of the agent that was loaded into args['numtraining'] = {agentOpts[opt1]: val1 ; agentOpts[opt2]:val2; agentOpts[opt3]:val3}\n ##args['layout'] - this function returns the layout object that was created by the layout class via the getlayout function.\n ##args['numTraining'] which contains which designates how many games are training games which means that the output remains surpressed\n ##args['ghosts'] - contains the instanciated ghost agents in line with the number the user specified\n ##args['display'] - contains whether the game output is displayed as minimal output and no graphics (-q) text only (-t) or via graphicsDiplay (standard)\n ##args['numGames'] - the number of GAMES to play\n ##args['record'] - Writes game histories to a file (named by the time they were played)\n ##args['catchExceptions'] = options.catchExceptions - Turns on exception handling and timeouts during games\n ##args['timeout'] = options.timeout -Maximum length of time an agent can spend computing in a single game",
"def __init__(self):\n self.parser = argparse.ArgumentParser(prog='PROG')\n self.parser.add_argument(\"--idir\", action=\"store\",\n dest=\"idir\", default=\"\", help=\"Input data path\")\n self.parser.add_argument(\"--dates\", action=\"store\",\n dest=\"dates\", default=\"\", help=\"dates or dates-rante to read, e.g. YYYYMMDD-YYYYMMDD\")",
"def read_cmd(self):\n\n parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)\n req_opts = parser.add_argument_group(\"Required Options\")\n req_opts.add_argument(\"--instance_dir\", required=True,\n help=\"directory with instances (not recursive\")\n \n opt_opts = parser.add_argument_group(\"Optional Options\")\n \n opt_opts.add_argument(\"--fn_suffix\", default=\".*\",\n help=\"suffix of instance file names\")\n opt_opts.add_argument(\"--cutoff\", default=10, type=int,\n help=\"running time cutoff [sec]\")\n opt_opts.add_argument(\"--memlimit\", default=2048, type=int,\n help=\"memory limit\")\n opt_opts.add_argument(\"--ac_budget\", default=360,\n help=\"configuration budget [sec]\")\n opt_opts.add_argument(\"--run_obj\", default=\"runtime\",\n choices=[\"runtime\", \"quality\"],\n help=\"run objective\")\n opt_opts.add_argument(\"--par-factor\", default=10,\n help=\"Factor by which to penalize unsolved instances. Usage may differ based on TAE used.\")\n\n opt_opts.add_argument(\"--binary\", default=\"clingo\",\n help=\"target binary\")\n opt_opts.add_argument(\"--pcs_file\", default=\"pcs/all_params.pcs\",\n help=\"parameter configuration file\")\n opt_opts.add_argument(\"--runsolver\", default=\"binaries/runsolver\",\n help=\"runsolver binary\")\n opt_opts.add_argument(\"--tae_class\", default=None,\n help=\"TAE class to individualize clingo calls -- has to inherit from smac.tae.execute_ta_run_aclib.ExecuteTARunAClib\")\n\n\n opt_opts.add_argument(\"--seed\", default=12345, type=int,\n help=\"random seed\")\n opt_opts.add_argument(\"--verbose_level\", default=logging.INFO,\n choices=[\"INFO\", \"DEBUG\"],\n help=\"random seed\")\n opt_opts.add_argument(\"--tae_args\", default=\"{}\",\n help=\"Miscellaneous options for the TAE\")\n \n\n args_, misc = parser.parse_known_args()\n self._check_args(args_)\n args_.tae_args=json.loads(args_.tae_args)\n\n # remove leading '-' in option names\n misc = dict((k.lstrip(\"-\"), v.strip(\"'\"))\n for k, v in zip(misc[::2], misc[1::2]))\n\n misc[\"instances\"] = self._find_files(dir_=args_.instance_dir, suffix_=args_.fn_suffix)\n misc[\"wallclock_limit\"] = args_.ac_budget\n misc[\"cutoff_time\"] = args_.cutoff\n misc[\"paramfile\"] = args_.pcs_file\n misc[\"algo\"] = \"\"\n misc[\"run_obj\"] = args_.run_obj\n\n return args_, misc",
"def parse_commandline():\n parser = optparse.OptionParser()\n\n parser.add_option(\"-n\", \"--num\", help=\"Pulsar number\", default=0, type=int)\n parser.add_option(\"-p\", \"--prfile\", help=\"Parameter file\", type=str)\n parser.add_option(\"-d\", \"--drop\", \\\n help=\"Drop pulsar with index --num in a full-PTA run \\\n (0 - No / 1 - Yes)\", default=0, type=int)\n parser.add_option(\"-c\", \"--clearcache\", \\\n help=\"Clear psrs cache file, associated with the run \\\n (to-do after changes to .par and .tim files)\", \\\n default=0, type=int)\n parser.add_option(\"-m\", \"--mpi_regime\", \\\n help=\"In MPI, manipulating with files and directories \\\n causes errors. So, we provide 3 regimes: \\n \\\n (0) No MPI - run code as usual; \\n \\\n (1) MPI preparation - manipulate files and prepare \\\n for the run (should be done outside MPI); \\n \\\n (2) MPI run - run the code, assuming all necessary \\\n file manipulations have been performed. \\n \\\n PolychordLite sampler in Bilby supports MPI\",\n default=0, type=int)\n parser.add_option(\"-w\", \"--wipe_old_output\", \\\n help=\"Wipe contents of the output directory. Otherwise, \\\n the code will attempt to resume the previous run. \\\n Be careful: all subdirectories are removed too!\", \\\n default=0, type=int)\n parser.add_option(\"-x\", \"--extra_model_terms\", \\\n help=\"Extra noise terms to add to the .json noise model \\\n file, a string that will be converted to dict. \\\n E.g. {'J0437-4715': {'system_noise': \\\n 'CPSR2_20CM'}}. Extra terms are applied either on \\\n the only model, or the second model.\", \\\n default='None', type=str)\n\n opts, args = parser.parse_args()\n\n return opts",
"def setup_args(self):\n self.parser = argparse.ArgumentParser()\n self.group = self.parser.add_mutually_exclusive_group()\n\n self.group.add_argument('-a', '--add', help='Adds a new task to the task list', action='store_true')\n self.group.add_argument('-r', '--remove', help='Removes a task from the task list', action='store_true')\n self.group.add_argument('-f', '--finish', help='Sets a task to be finished', action='store_true')\n self.group.add_argument('-u', '--unfinish', help='Sets a task to be not finished', action='store_true')\n self.group.add_argument('-c', '--change', help='Updates an existing task', action='store_true')\n self.group.add_argument('-v', '--view', help='View your current task list', action='store_true')\n\n return self.parser",
"def processArgs(self, argv):\n parser = OptionParser(usage=usage)\n parser.add_option(\"-a\", \"--show_ADT\", action=\"store_true\", dest=\"show_ADT\",\n default=self.show_ADT, help=\"Display ADT value if set\")\n parser.add_option(\"-f\", \"--show_file\", action=\"store_true\", dest=\"show_file\",\n default=self.show_file, help=\"Display matching filename if set\")\n parser.add_option(\"-t\", \"--show_time\", action=\"store_true\", dest=\"show_time\",\n default=self.show_time, help=\"Display message time\")\n parser.add_option(\"-v\", \"--show_visitID\", action=\"store_true\", dest=\"show_visitID\",\n default=self.show_visitID, help=\"Display visit ID\")\n parser.add_option(\"-p\", \"--show_pc\",\n action=\"store_true\",\n dest=\"show_pc\",\n default=self.show_pc,\n help=\"Display patient class\")\n\n (options, pargs) = parser.parse_args()\n if len(pargs) < 3:\n parser.error(\"incorrect number of arguments\")\n\n self.show_ADT = parser.values.show_ADT\n self.show_file = parser.values.show_file\n self.show_time = parser.values.show_time\n self.show_visitID = parser.values.show_visitID\n self.show_pc = parser.values.show_pc\n \n self.segments_of_interest = pargs.pop(0)\n if len(self.segments_of_interest) != 3:\n parser.error(\"segment '%s' looks incorrect, expected something like 'PV1'\"\n % self.segments_of_interest)\n\n try:\n nums = pargs.pop(0).split(\",\")\n for num in nums:\n if 'MSH' == self.segments_of_interest:\n num = int(num) - 1\n self.sequences.append(int(num))\n except:\n parser.error(\"sequence must be an integer, separate multiple w/ comma and no spaces\")\n\n for patternOrFile in pargs:\n for file in glob.glob(patternOrFile):\n if not os.path.isfile(file):\n parser.error(\"can't open input file %s\" % file)\n self.filelist.append(file)\n \n # Require at least one file\n if not len(self.filelist):\n parser.error(\"at least one input file is required\")",
"def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager",
"def ParseArguments():\n\t#TODO: check input variable types!\n\t# check for integers ans strings\n\t# check for distance and distance cutoff value: ONLY CERTAIN VALUES ALLOWED\n\targ_parser = argparse.ArgumentParser(description=\"Program to get background distribution matching user input SNPs on the following parameters {MAF, distance to nearest gene, gene density}\")\n\tsubparsers = arg_parser.add_subparsers(dest='subcommand',\n\t\t\t\t\t\t\t\t\t title='subcommands in this script',\n\t\t\t\t\t\t\t\t\t description='valid subcommands. set subcommand after main program required arguments',\n\t\t\t\t\t\t\t\t\t help='You can get additional help by writing <program-name> <subcommand> --help')\n\n\t## Subparsers\n\targ_parser_annotate = subparsers.add_parser('annotate')\n\t#arg_parser_annotate.set_defaults(func=run_annotate)\n\targ_parser_match = subparsers.add_parser('match')\n\t#arg_parser_annotate.set_defaults(func=run_match)\n\n\n\targ_parser.add_argument(\"--user_snps_file\", help=\"Path to file with user-defined SNPs\", required=True) # TODO: make the program read from STDIN via '-'\n\targ_parser.add_argument(\"--output_dir\", help=\"Directory in which output files, i.e. random SNPs will be written\", required=True)\n\t#arg_parser.add_argument(\"--output_dir\", type=ArgparseAdditionalUtils.check_if_writable, help=\"Directory in which output files, i.e. random SNPs will be written\", required=True)\n\targ_parser.add_argument(\"--distance_type\", help=\"ld or kb\", required=True)\n\targ_parser.add_argument(\"--distance_cutoff\", help=\"r2, or kb distance\", required=True)\n\t# NEW: options\n\t#arg_parser.add_argument(\"--status_file\", help=\"Bool (switch, takes no value after argument); if set then logging is ENABLED.\", action='store_true')\n\t#arg_parser.add_argument(\"--status_file\", help=\"If set, a json file will be written. Value should be the a filepath.\")\n\targ_parser.add_argument(\"--web\", help=\"If set, the program will run in web mode. VALUE should be the a filepath to output (temporary) file - usually this will be the session_id. The web mode activates: 1) creating a status_obj and writing it to json file; 2) ENABLE writing a json report file;\")\n\targ_parser.add_argument(\"--NoLogger\", help=\"Bool (switch, takes no value after argument); if set then logging is DISAPLED. Logfile will be placed in outputdir.\", action='store_true')\n\n\n\t### MATCH arguments\n\targ_parser_match.add_argument(\"--N_sample_sets\", type=int, help=\"Number of matched SNPs to retrieve\", required=True) # 1000 - \"Permutations?\" TODO: change name to --n_random_snp_sets or --N\n\t#TODO: add argument that describes if ABSOLUTE of PERCENTAGE deviation should be used\n\targ_parser_match.add_argument(\"--max_freq_deviation\", type=int,help=\"Maximal deviation of SNP MAF bin [MAF +/- deviation]\", default=5) # 5\n\targ_parser_match.add_argument(\"--max_distance_deviation\", type=int, help=\"Maximal PERCENTAGE POINT deviation of distance to nearest gene [distance +/- %%deviation])\", default=5) # 20000\n\t#TODO: CHECK THAT max_distance_deviation > 1 %\n\t#TODO: WHY IS max_genes_count_deviation type float!!!!????\n\targ_parser_match.add_argument(\"--max_genes_count_deviation\", type=float, help=\"Maximal PERCENTAGE POINT deviation of genes in locus [gene_density +/- %%deviation]\", default=5) # 0.2\n\targ_parser_match.add_argument(\"--set_file\", help=\"Bool (switch, takes no value after argument); if set then write out set files to rand_set..gz. Default is false\", action='store_true')\n\n\targs = arg_parser.parse_args()\n\n\treturn args",
"def get_parser():\n\tparser = argparse.ArgumentParser('preprocessing.py',\n\t\tformatter_class=argparse.RawDescriptionHelpFormatter,\n\t\tdescription=\"\"\"\nRun a piepline for one NICER ObsID data. \n\t\t\"\"\"\n\t\t)\n\tversion = '%(prog)s ' + __version__\n\tparser.add_argument('obsid', type=str, \n\t\thelp='ObsID (e.g., 4012010109)')\t\n\treturn parser",
"def parse_args():\n parser = argparse.ArgumentParser('Reading Comprehension on BaiduRC dataset')\n parser.add_argument('--prepare', action='store_true',\n help='create the directories, prepare the vocabulary and embeddings')\n parser.add_argument('--train', action='store_true',\n help='train the model')\n parser.add_argument('--generate', action='store_true',\n help='predict the answers for test set with trained model')\n parser.add_argument('--gentest', action='store_true',\n help='predict the answers for test set with trained model')\n parser.add_argument('--gpu', type=str, default='0',\n help='specify gpu device')\n\n train_settings = parser.add_argument_group('train settings')\n train_settings.add_argument('--optim', default='Adam',\n help='optimizer type')\n train_settings.add_argument('--learning_rate', type=float, default=0.001,\n help='learning rate')\n train_settings.add_argument('--weight_decay', type=float, default=0,\n help='weight decay')\n train_settings.add_argument('--dropout', type=float, default=0,\n help='dropout keep rate')\n train_settings.add_argument('--batch_size', type=int, default=128,\n help='train batch size')\n train_settings.add_argument('--epochs', type=int, default=10,\n help='train epochs')\n\n model_settings = parser.add_argument_group('model settings')\n model_settings.add_argument('--embed_size', type=int, default=128,\n help='size of the embeddings')\n model_settings.add_argument('--hidden_size', type=int, default=256,\n help='size of LSTM hidden units')\n model_settings.add_argument('--max_seq_len', type=int, default=50,\n help='max passage num in one sample')\n model_settings.add_argument('--max_gen_len', type=int, default=50,\n help='max length of passage')\n\n path_settings = parser.add_argument_group('path settings')\n path_settings.add_argument('--vocab_dir', default='../data/vocab/',\n help='the dir to save vocabulary')\n path_settings.add_argument('--model_dir', default='../data/models/',\n help='the dir to store models')\n path_settings.add_argument('--result_dir', default='../data/results/',\n help='the dir to output the results')\n path_settings.add_argument('--summary_dir', default='../data/summary/',\n help='the dir to write tensorboard summary')\n path_settings.add_argument('--log_path',\n help='path of the log file. If not set, logs are printed to console')\n return parser.parse_args()",
"def arg_parse():\n parser = argparse.ArgumentParser(description=\"Fetches the current ATT&CK content expressed as STIX2 and creates spreadsheet mapping Techniques with Mitigations, Groups or Software.\")\n parser.add_argument(\"-d\", \"--domain\", type=str, required=True, choices=[\"enterprise_attack\", \"mobile_attack\"], help=\"Which ATT&CK domain to use (Enterprise, Mobile).\")\n parser.add_argument(\"-m\", \"--mapping-type\", type=str, required=True, choices=[\"groups\", \"mitigations\", \"software\"], help=\"Which type of object to output mappings for using ATT&CK content.\")\n parser.add_argument(\"-t\", \"--tactic\", type=str, required=False, help=\" Filter based on this tactic name (e.g. initial-access) \" )\n parser.add_argument(\"-s\", \"--save\", type=str, required=False, help=\"Save the CSV file with a different filename.\")\n return parser",
"def read_arguments():\n\n parser = argparse.ArgumentParser(\n description='Enter arguments to run the pipeline.')\n\n # arguments for external files that might be necessary to run the program\n parser.add_argument(\n '--cost_network', type=str,\n help='file storing the state dictionary of the cost network.'\n )\n\n parser.add_argument(\n '--policy_network', type=str,\n help='File storing the state dictionary of the Policy network.'\n )\n\n parser.add_argument(\n '--state_dictionary', type=str,\n help='Environment on which to run the algo (obstacle/no obstacle)'\n )\n\n parser.add_argument(\n '--expert_trajectory_file', type=str,\n help='Path to file containing the exeprt trajectories.')\n\n # network hyper parameters\n parser.add_argument(\n '--cost_network_input', type=int, default=29,\n help='layer size of cost network. None if you have specified cost \\\n network state dict.')\n\n parser.add_argument(\n '--cost_network_hidden', nargs='+', type=int, default=[256, 256],\n help='Hidden size of cost network.None if you have specified cost \\\n network state dict.')\n\n parser.add_argument(\n '--cost_network_output', type=int, default=1,\n help='Output layer size of cost network.None if you have specified \\\n cost network state dict.')\n\n parser.add_argument(\n '--policy_network_input', type=int, default=29,\n help='Input layer size of policy network.None if you have specified \\\n policy network state dict.')\n\n parser.add_argument(\n '--policy_network_hidden', nargs='+', type=int, default=[256, 256],\n help='Hidden layer size of policy network.None if you have specified \\\n policy network state dict.')\n\n parser.add_argument(\n '--policy_network_output', type=int, default=4,\n help='Output layer size of policy network.None if you have specified \\\n policy network state dict.')\n\n # other run hyper parameters like optimizer and all???\n\n # run hyperparameters\n parser.add_argument('--irl_iterations', type=int,\n help='Number of times to iterate over the IRL part.')\n\n parser.add_argument(\n '--no_of_samples', type=int,\n help='Number of samples to create agent state visitation frequency.')\n\n parser.add_argument(\n '--rl_iterations', type=int,\n help='Number of iterations to be performed in the RL section.')\n\n # arguments for the I/O of the program\n parser.add_argument(\n '--display_board', type=str, default='False',\n help='If True, draw envirnment.')\n\n parser.add_argument(\n '--on_server', type=str, default='True',\n help='False if program is to run on server.')\n\n parser.add_argument('--store_results', type=str, default='True')\n\n parser.add_argument(\n '--plot_interval', type=int, default=10,\n help='Iterations before loss and reward curve plots are stored.')\n\n parser.add_argument(\n '--savedict_policy_interval', type=int, default=100,\n help='Iterations after which the policy network will be stored.')\n\n parser.add_argument(\n '--savedict_cost_interval', type=int, default=1,\n help='Iterations after which the cost network will be stored.')\n\n # arguments for the broader pipeLine\n parser.add_argument(\n '--rl_method', type=str,\n help='Enter the RL method to be used.')\n\n parser.add_argument(\n '--feature_space', type=str,\n help='Type of features to be used to get the state of the agent.')\n\n parser.add_argument('--irl_method', type=str,\n help='Enter the IRL method to be used.')\n\n parser.add_argument(\n '--run_type', type=str, default='train',\n help='Enter if it is a train run or a test run.(train/test).')\n\n parser.add_argument(\n '--verbose', type=str, default='False',\n help='Set verbose to \"True\" to get a myriad of print statements crowd\\\n your terminal. Necessary information should be provided with either\\\n of the modes.')\n\n parser.add_argument(\n '--no_of_testRuns', type=int, default=0,\n help='If --run_type set to test, then this denotes the number of test \\\n runs you want to conduct.')\n\n _args = parser.parse_args()\n\n return _args",
"def setup_parser():\n\n psr_desc=\"cfdi engine service interface\"\n psr_epi=\"select a config profile to specify defaults\"\n\n psr = argparse.ArgumentParser(\n description=psr_desc, epilog=psr_epi)\n\n psr.add_argument('-nmp', action='store_true', dest='nmp',\n help='unique process approach (useful in development)')\n\n psr.add_argument('-d', action='store_true', dest='debug',\n help='print debug information')\n\n psr.add_argument('-c', '--config', action='store',\n dest='config',\n help='load an specific config profile')\n\n psr.add_argument('-p', '--port', action='store',\n dest='port',\n help='launches service on specific port')\n\n return psr.parse_args()",
"def argument_parser():\n parser = argparse.ArgumentParser(\n description=\"\"\"Creates a PC file with a selection of points from a BBox and level\"\"\")\n parser.add_argument('-s','--srid',default='',help='SRID',type=int, required=True)\n parser.add_argument('-e','--mail', default='',help='E-mail address to send e-mail after completion',type=str, required=True)\n parser.add_argument('-b','--bbox', default='', help='Bounding box for the points selection given as \"minX minY maxX maxY\"',required=True,type=str)\n parser.add_argument('-l','--level', default='',help='Level of data used for the generation (only used if the used table is the one with the potree data). If not provided the raw data is used',type=str)\n parser.add_argument('-d','--dbname',default=utils.DB_NAME,help='Postgres DB name [default ' + utils.DB_NAME + ']',type=str)\n parser.add_argument('-u','--dbuser', default=USERNAME,help='DB user [default ' + USERNAME + ']',type=str)\n parser.add_argument('-p','--dbpass', default='',help='DB pass',type=str)\n parser.add_argument('-t','--dbhost', default='',help='DB host',type=str)\n parser.add_argument('-r','--dbport', default='',help='DB port',type=str)\n parser.add_argument('-w','--baseurl', default='',help='Base URL for the output file (web access)',type=str)\n parser.add_argument('-f','--basepath', default='',help='Base path for the output file (internal access)',type=str)\n return parser",
"def setup_cmd_args():\n parser = argparse.ArgumentParser(description=\"This program will query G-POD and COPHUB on the same datasets, in order to obtain the number of data results, compare them compile a report with the differences.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # parser.add_argument(\"root_dir\", help=\"The root directory containing data to check\")\n # parser.add_argument(\"--workspace\", help=\"Set Workspace manually\")\n parser.add_argument(\"--outputlist\", help=\"Folder to write the output lists with the un-synced products.\", default=\"c:\\\\temp\\\\\")\n parser.add_argument(\"--daysback\", help=\"Report with a given number of days back from today\", default=0)\n parser.add_argument(\"--dataset\", help=\"Set which dataset to query (chose S3A_SR_1_SRA_A_PREOPS or S3B_SR_1_SRA_A_NTC)\")\n parser.add_argument(\"--startdate\", help=\" The Start Date (format: YYYY-MM-DD) \", default=\"2016-06-01\")\n parser.add_argument(\"--enddate\",help=\" The End Date (format: YYYY-MM-DD)\")\n parser.add_argument(\"--cphubuser\",help=\"COPHUB username\", required=True)\n parser.add_argument(\"--cphubpw\",help=\"COPHUB password\", required=True)\n parser.add_argument(\"-email\", type=str, help=\"Email to send the results\", action=\"append\")\n parser.add_argument('-t', action='store_true', help=\"Today as enddate. Otherwise the last day of the previous month is considered.\")\n parser.add_argument('-n', action='store_true', help=\"Normal numeric check\")\n parser.add_argument('-m', action='store_true', help=\"Monthly check with product listing.\")\n return parser.parse_args()",
"def parse_command_line():\n parser = argparse.ArgumentParser()\n\n help_str = \\\n 'The collection folder to sort files into. ' \\\n 'If the folder does not exist, it will be created along with the ' \\\n 'necessary contents.'\n parser.add_argument('-c', '--collection', help=help_str)\n\n help_str = \\\n 'The source folder to import files from. Has to exist and ' \\\n 'has to be a folder.'\n parser.add_argument('-s', '--source', help=help_str, required=False)\n\n help_str = \\\n 'View the gallery in random order auto skpping after the' \\\n 'given amount of seconds'\n parser.add_argument('-v', '--view', help=help_str, required=False)\n\n return parser.parse_args()",
"def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()",
"def parse_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-f\", \"--folder\", required=True, action=\"store\", dest=\"folder\", help=\"input folder\")\n parser.add_argument(\"-t\", \"--tool\", required=True, action=\"store\", dest=\"tool\", choices=[\"spacy\",\"stanford\"], help=\"NER tool\")\n parser.add_argument(\"-l\", \"--lang\", required=True, action=\"store\", dest=\"lang\", help=\"language\")\n\n return parser.parse_args()",
"def prepare_arg_parser():\n\tparser = argparse.ArgumentParser(\n\t\tdescription=\"\"\"Ajout des ponctuations réelles dans un xml de \n\t\t refbibs (NB lent: ~ 2 doc/s sur 1 thread)\"\"\",\n\t\tusage=\"\"\"ragreage.py \n\t\t -x ech/tei.xml/oup_Human_Molecular_Genetics_ddp278.xml\n\t\t -p ech/pdf/oup_Human_Molecular_Genetics_ddp278.pdf\n\t\t -m [bibzone|biblines|bibfields|authornames]\"\"\",\n\t\tepilog=\"- © 2014-15 Inist-CNRS (ISTEX) romain.loth at inist.fr -\"\n\t\t)\n\t\n\t\n\tparser.add_argument('-x','--xmlin',\n\t\tmetavar='path/to/xmlfile',\n\t\thelp=\"\"\"\n\t\tpath to a TEI.xml with citations in <biblStruct> xml format \n\t\t(perhaps to be created from native XML by a call like \n\t\t`saxonb-xslt -xsl:tools/Pub2TEI/Stylesheets/Publishers.xsl\n\t\t-s:exemples_RONI_1513/rsc_1992_C3_C39920001646.xml`)'\"\"\",\n\t\ttype=str,\n\t\trequired=True,\n\t\taction='store')\n\t\t\n\t\n\t\n\tparser.add_argument('-p','--pdfin',\n\t\tmetavar='path/to/pdffile',\n\t\thelp=\"\"\"path to a pdf file of the same text, for attempted\n\t\t pdftottext and citation regexp match\"\"\",\n\t\ttype=str,\n\t\tdefault=None , # cf juste en dessous\n\t\taction='store')\n\t\n\tparser.add_argument('-t','--txtin',\n\t\tmetavar='path/to/txtfile',\n\t\thelp=\"\"\"pdfin can be replaced by a path to a txt flow.\n\t\tThis input text must be very close to the xml content\n\t\t(or segment thereof, in accordance with a chosen -m type)\"\"\",\n\t\ttype=str,\n\t\tdefault=None , # cf juste en dessous\n\t\taction='store')\n\t\t\n\t\n\t\n\tparser.add_argument('-m','--model-type',\n\t\tmetavar='name-of-model',\n\t\thelp=\"\"\"format output as a valid tei's 'listBibl' (default)\n\t\t or tailored to a Grobid crf model pseudotei input among:\n\t\t {'bibzone', 'biblines', 'bibfields', 'authornames'}\"\"\",\n\t\ttype=str,\n\t\tdefault='listBibl' ,\n\t\taction='store')\n\t\n\t\n\tparser.add_argument('-d','--debug',\n\t\tmetavar=1,\n\t\ttype=int,\n\t\thelp='logging level for debug info in [0-3]',\n\t\tdefault=0,\n\t\taction='store')\n\t\n\t\t\n\tparser.add_argument('-r', '--remainder',\n\t\tdest='mask',\n\t\thelp='show mask after matches instead of normal output',\n\t\taction='store_true')\n\t\n\treturn parser",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input\", help=\"Fasta rDNA input\")\n parser.add_argument(\"output\", help=\"GFF annotation\")\n parser.add_argument(\"kingdom\", help=\"Choose kingdom\")\n args = parser.parse_args()\n command(args)",
"def main():\n\n parser = argparse.ArgumentParser(description=\"generateTestStubs\")\n\n parser.add_argument(\"taskFile\",\n help=\"Path for assignment file.\")\n\n args = parser.parse_args()\n\n if not os.path.exists(args.taskFile):\n print(\"Task file does not exist.\")\n sys.exit(1)\n\n taskMgr = EEWebLPProject()\n taskMgr.initLP()\n\n #taskMgr.listProjects()\n #taskMgr.loadTree([\"project_id=8008922\"])\n tasks = taskMgr.getTasks([\"project_id=6890048\"],parent_id=8008922)\n\n fileByAssignee = taskMgr.getTaskOwners(args.taskFile)\n taskMgr.updateTaskOwners(fileByAssignee,tasks)",
"def process_command_line(argv):\n\tif argv is None:\n\t\targv = sys.argv[1:]\n\t\t\n\t# initialize the parser object:\n\tparser = optparse.OptionParser(\n\t\t\t formatter=optparse.TitledHelpFormatter(width=78),\n\t\t\t add_help_option=None)\n\n\t# define options here:\n\n\tparser.add_option(\n\t\t'-w', '--workdir',\n\t\thelp='Workdir where temporary and final files will be saved.')\n\n\tparser.add_option(\n\t\t'-a', '--assembly_file',\n\t\thelp='File with a list of assemblies for which a reference genome is to be determined.')\n\n\tparser.add_option(\n \t'--installation',\n \thelp='Pipeline installation.')\n\n parser.add_option(\n '--EDIRECT',\n help='edirect tools installation.')\n\n parser.add_option(\n '--QUAST',\n help='Quast installation.')\n\n parser.add_option(\n '--mail',\n help='Email for edirect.')\n\n\tparser.add_option(\n\t\t'-o', '--organism',\n\t\thelp='Organism to be searched for on NCBI Assembly.')\n\n\tparser.add_option(\n\t\t'--dont_delete', action=\"store_true\",\n\t\thelp='Do not delete temporary files after running.')\n\t\t\n\tparser.add_option(\n\t\t'-s', '--script', default=\"/home/users/yair/Documents/PhD_projects/project_B/bin/downloading_database/determine_best_genome.sh\",\n\t\thelp='Path of determine_best_genome.sh script')\n\n\tparser.add_option( # customized description; put --help last\n\t\t'-h', '--help', action='help',\n\t\thelp='Show this help message and exit.')\n\n\tsettings, args = parser.parse_args(argv)\n\n\treturn settings, args",
"def parse_args():\n parser = argparse.ArgumentParser(\n description=\"A simple tool to train a character level classifier and predict a class based on an input \"\n \"string. Useful for tasks like detecting country by address line, nationality by name, \"\n \"etc. Prediction is printed into stdout.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n required_group = parser.add_argument_group(\"Required\")\n optional_group = parser.add_argument_group(\"Optional\")\n\n required_group.add_argument(\n \"action\", help=\"Action: train or predict?\", choices=[\"train\", \"predict\"]\n )\n required_group.add_argument(\n \"-f\",\n \"--input-file\",\n help=\"\"\"A file with an input line and a prediction category, separated by SEP (--separator) symbol. If '-' then stdin is used\"\"\",\n required=True,\n )\n required_group.add_argument(\n \"-m\",\n \"--model-file\",\n help=\"Model file. Overwriten after training or loaded for prediction. Tokenizer data will be stored along as 2 <modelname>_tokenizer_x/y.pickle files\",\n required=True,\n )\n optional_group.add_argument(\n \"--extra-lstm-layer\",\n help=\"Add a second LSTM layer for finding extra structure in some cases\",\n action=\"store_true\",\n )\n optional_group.add_argument(\n \"--min-printable-score\",\n help=\"\"\"Don't print the class and its score if the probability is lower than this limit\"\"\",\n type=float,\n default=0.001,\n )\n optional_group.add_argument(\n \"--num-classes\",\n help=\"Print number of classes for each input line\",\n type=int,\n default=1,\n )\n optional_group.add_argument(\n \"--num-epochs\", help=\"Number of training epochs\", type=int, default=NUM_EPOCHS\n )\n optional_group.add_argument(\n \"--print-scores\",\n help=r\"Print scores for each class in such form: CLASS1=95.3%%,CLASS2=3.2%%,etc...\",\n action=\"store_true\",\n )\n optional_group.add_argument(\n \"--separator\",\n help=\"Output separator symbol: orig_lin<SEP><class1=0.98>,<class2=0.01>,...\",\n default=\"|\",\n )\n optional_group.add_argument(\n \"--weights-file\",\n help=\"Weight from previous run if you want to continue training\",\n type=str,\n default=None,\n )\n optional_group.add_argument(\n \"--word-level\",\n help=\"Replace char-level tokenizer for input string with word-level one\",\n action=\"store_true\",\n )\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n return parser.parse_args()",
"def arg_parse():\n script_folder = sys.path[0]\n code_folder = os.path.dirname(script_folder)\n project_folder = os.path.dirname(code_folder)\n cimis_folder = os.path.join(project_folder, 'cimis')\n img_folder = os.path.join(cimis_folder, 'input_img')\n ancillary_folder = os.path.join(cimis_folder, 'ancillary')\n\n parser = argparse.ArgumentParser(\n description='CIMIS daily ETr',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '--start', required=True, type=_utils.valid_date, metavar='YYYY-MM-DD',\n help='Start date')\n parser.add_argument(\n '--end', required=True, type=_utils.valid_date, metavar='YYYY-MM-DD',\n help='End date')\n parser.add_argument(\n '--img', default=img_folder, metavar='PATH',\n help='Input IMG raster folder path')\n parser.add_argument(\n '--ancillary', default=ancillary_folder, metavar='PATH',\n help='Ancillary raster folder path')\n parser.add_argument(\n '--output', default=cimis_folder, metavar='PATH',\n help='Output raster folder path')\n parser.add_argument(\n '--etr', default=False, action=\"store_true\",\n help='Compute alfalfa reference ET (ETr)')\n parser.add_argument(\n '--eto', default=False, action=\"store_true\",\n help='Compute grass reference ET (ETo)')\n parser.add_argument(\n '--extent', default=None, metavar='PATH',\n help='Subset extent path')\n parser.add_argument(\n '-te', default=None, type=float, nargs=4,\n metavar=('xmin', 'ymin', 'xmax', 'ymax'),\n help='Subset extent in decimal degrees')\n parser.add_argument(\n '--stats', default=False, action=\"store_true\",\n help='Compute raster statistics')\n parser.add_argument(\n '--use_cimis_eto', default=False, action=\"store_true\",\n help='Use CIMIS ETo if ETr/ETo cannot be computed')\n parser.add_argument(\n '-o', '--overwrite', default=False, action=\"store_true\",\n help='Force overwrite of existing files')\n parser.add_argument(\n '-d', '--debug', default=logging.INFO, const=logging.DEBUG,\n help='Debug level logging', action=\"store_const\", dest=\"loglevel\")\n args = parser.parse_args()\n\n # Convert relative paths to absolute paths\n if args.img and os.path.isdir(os.path.abspath(args.img)):\n args.img = os.path.abspath(args.img)\n if args.ancillary and os.path.isdir(os.path.abspath(args.ancillary)):\n args.ancillary = os.path.abspath(args.ancillary)\n\n return args",
"def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument('infile',\n help='path to file containing objects')\n p.add_argument('n1',\n help='night 1')\n p.add_argument('n2',\n help='night 2')\n p.add_argument('observatory',\n help='Astropy name of observatory')\n return p.parse_args()",
"def getArgs():\r\n parser = argparse.ArgumentParser(\r\n description = \"\"\"This program uses the validation data and a given model to do brain segmentation that will be sent to FeTs challenge to get evaluated \"\"\")\r\n parser.add_argument(\"-d\", type = str, help = \"d is the path to validation dataset, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/\")\r\n parser.add_argument(\"-m\", type = str, help = \"m is the path for the model to load, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/cpt/cpt_0_1\")\r\n parser.add_argument(\"-o\", type = str, help = \"o is the output path, e.g: C:/Documents/inferences\")\r\n # Get your arguments\r\n return parser.parse_args()"
] |
[
"0.62851864",
"0.6227594",
"0.6205946",
"0.62008595",
"0.6168491",
"0.6139022",
"0.613443",
"0.61032486",
"0.6086535",
"0.6081039",
"0.6063985",
"0.60548097",
"0.6054122",
"0.6045408",
"0.6036239",
"0.6033285",
"0.6021215",
"0.6017663",
"0.601363",
"0.60068935",
"0.6006618",
"0.60042393",
"0.5997991",
"0.59936684",
"0.5993349",
"0.59806025",
"0.5978937",
"0.59730935",
"0.5972941",
"0.59678286"
] |
0.6481555
|
0
|
Checks the self._execute_frequency to figure out the oid to use for cpu_utilization
|
def _get_cpu_interval(self):
self._polling_execute_frequency = int(self._plugin_conf[u'main'][u'polling_frequency'])
if 5 <= self._polling_execute_frequency < 60:
return cpmCPUTotalMonIntervalValue # replaces cpmCPUTotal5SecRev
elif 60 <= self._polling_execute_frequency < 300:
return cpmCPUTotal1minRev
elif 300 <= self._polling_execute_frequency:
return cpmCPUTotal5minRev
else:
return cpmCPUTotal1minRev
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_cpu(self, values):\n try:\n cpu_percent = psutil.cpu_percent()\n values[keys.KEY_CPU_PERCENT] = cpu_percent\n cpu_times = psutil.cpu_times()\n values[keys.KEY_CPU_USER_TIMES] = cpu_times.user\n except:\n logging.error(\"Error collecting CPU stats.\")\n\n try:\n cpu_temp = cpu_status.cpu_temperature()\n if cpu_temp > 0:\n values['cpu - temperature'] = cpu_temp\n except:\n pass",
"def get_cpu(self):\n pass",
"def getcpuusage(self):\n return ord(self.reg(0x11, write=1))",
"def get_cpu_usage(self):\n\t\treturn call_sdk_function('PrlStatCpu_GetCpuUsage', self.handle)",
"def cpu_time(self):",
"def cpu(self) -> int:\n return pulumi.get(self, \"cpu\")",
"def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")",
"def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")",
"def cpu(self):\r\n return self._cpu",
"def _cpu_usage(self, e):\n\n cores = os.cpu_count()\n try:\n cpu_usage = int(self.cpu_entry.get())\n if cpu_usage < 0 or cpu_usage > 100:\n self.invalid_input()\n elif cpu_usage == 0:\n self.processes = 1\n else:\n self.processes = round(cpu_usage / 100 * cores)\n except ValueError:\n self.invalid_input()",
"def get_overall_cpu_util(dut, exclude_proc_name=None):",
"def test_cpu(self):\n cpu = CPUCyclesResource(128 * (2**20))\n self.assertEqual(cpu.get_value(), 128 * (2**20))\n cpu = CPUCyclesResource(128 * (2**19))\n self.assertEqual(cpu.get_value(), 128 * (2**19))\n cpu = CPUCyclesResource(128 * (2**21))\n self.assertEqual(cpu.get_value(), 128 * (2**21))",
"def cpu_usage(self):\n dsp = c_float()\n stream = c_float()\n geometry = c_float()\n update = c_float()\n total = c_float()\n ckresult(\n _dll.FMOD_System_GetCPUUsage(\n self._ptr,\n byref(dsp),\n byref(stream),\n byref(geometry),\n byref(update),\n byref(total),\n )\n )\n return so(\n dsp=dsp.value,\n stream=stream.value,\n geometry=geometry.value,\n update=update.value,\n total=total.value,\n )",
"def checkCpu(self):\n cpu = self.getCpu()\n err_msg = []\n task_result = device_status = 0\n\n if cpu is None:\n err_msg.append('Get CPU info failed')\n task_result = device_status = 1\n else:\n # 以后可扩展告警条件\n pass\n return cpu, err_msg, task_result, device_status",
"def get_cpu_usage(conn):\n prev_idle = 0\n prev_total = 0\n cpu = conn.getCPUStats(-1, 0)\n if type(cpu) == dict:\n for num in range(2):\n idle = list(conn.getCPUStats(-1, 0).values())[1]\n total = sum(list(conn.getCPUStats(-1, 0).values()))\n diff_idle = idle - prev_idle\n diff_total = total - prev_total\n diff_usage = (1000 * (diff_total - diff_idle) / diff_total + 5) / 10\n prev_total = total\n prev_idle = idle\n if num == 0:\n time.sleep(1)\n else:\n if diff_usage < 0:\n diff_usage = 0\n else:\n return {'usage': None}\n return {'usage': diff_usage}",
"def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"cpu_per_unit\")",
"def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"cpu_per_unit\")",
"def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"cpu_per_unit\")",
"def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"cpu_per_unit\")",
"def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"cpu_per_unit\")",
"def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"cpu_per_unit\")",
"def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"cpu_per_unit\")",
"def cpu():\n sin = psutil.cpu_percent()\n return round(sin / 100, 3)",
"def _fetchPerf(self):\n self.state = ZenProcessTask.STATE_FETCH_PERF\n\n oids = []\n for pid in self._deviceStats.pids:\n if not AS400PLUG in self._device.zCollectorPlugins:\n oids.extend([CPU + str(pid), MEM + str(pid)])\n else:\n oids.extend([AS400CPU + str(pid)])\n if oids:\n singleOids = set()\n results = {}\n oidsToTest = oids\n chunkSize = self._maxOidsPerRequest\n while oidsToTest:\n for oidChunk in chunk(oidsToTest, chunkSize):\n try:\n log.debug(\"%s fetching oid(s) %s\" % (self._devId, oidChunk))\n result = yield self._get(oidChunk)\n results.update(result)\n except (error.TimeoutError, Snmpv3Error) as e:\n log.debug(\"error reading oid(s) %s - %s\", oidChunk, e)\n singleOids.update(oidChunk)\n oidsToTest = []\n if singleOids and chunkSize > 1:\n chunkSize = 1\n log.debug(\"running oids for %s in single mode %s\" % (self._devId, singleOids))\n oidsToTest = list(singleOids)\n self._storePerfStats(results)",
"def test_cpu_processor_count_value(self):\n \n cpu_processor_count = get_cpu_information()[4] \n \n # Check to make sure the returned value is \"Intel(R) Core(TM) i7-4771 CPU @ 3.50GHz\"\n self.assertEqual(cpu_processor_count, 1)",
"def _get_cpu_name_count_frequency() -> Tuple[str, int, int]:\n cpu_info = get_cpu_info()\n frequency, what = cpu_info[\"hz_actual\"]\n assert what == 0, \"I have no idea what this is, but it seem to always be 0...\"\n return cpu_info[\"brand_raw\"], cpu_info[\"count\"], frequency",
"def eval_cpuset():\n\tnum_cpu = run('grep -c ^processor /proc/cpuinfo',quiet=True,warn_only=True)\n\tprint(red('Number of cpus : \\t'+num_cpu))",
"def get_cpu(isamAppliance, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\n \"Retrieving the CPU Usage Statistics\",\n \"/statistics/systems/cpu.json{0}\".format(\n tools.create_query_string(\n timespan=statistics_duration)),requires_model=requires_model)",
"def cpu_freq(self):\n self.monitoring_object['cpu_freq'] = \\\n psutil.cpu_freq(percpu=True)",
"def get_cpu_usage():\n cpuInfo1 = read_cpu_usage()\n if not cpuInfo1:\n return None\n\n time.sleep(2)\n\n cpuInfo2 = read_cpu_usage()\n if not cpuInfo2:\n return None\n\n cpuUsage = OrderedDict()\n\n for key in cpuInfo1.keys():\n cpustr1 = cpuInfo1[key]\n cpustr2 = cpuInfo2[key]\n\n if len(cpustr1) >= 7 and len(cpustr2) >= 7:\n\n totalCPUTime1 = long(cpustr1[1]) + long(cpustr1[2]) + long(cpustr1[3]) + long(cpustr1[4]) + long(cpustr1[5]) + long(cpustr1[6]) + long(\n cpustr1[7])\n usedCPUTime1 = long(cpustr1[1]) + long(cpustr1[2]) + long(cpustr1[3])\n\n totalCPUTime2 = float(cpustr2[1]) + long(cpustr2[2]) + long(cpustr2[3]) + long(cpustr2[4]) + long(cpustr2[5]) + long(cpustr2[6]) + long(\n cpustr2[7])\n usedCPUTime2 = float(cpustr2[1]) + long(cpustr2[2]) + long(cpustr2[3])\n\n cpuPct = round((usedCPUTime2 - usedCPUTime1) * 100 / (totalCPUTime2 - totalCPUTime1), 2)\n cpuUsage[key] = cpuPct\n\n return cpuUsage"
] |
[
"0.6275917",
"0.6085181",
"0.60578823",
"0.60562146",
"0.5991525",
"0.59678113",
"0.59386593",
"0.59386593",
"0.58819187",
"0.58737224",
"0.5819669",
"0.58091515",
"0.5777356",
"0.5775983",
"0.5764349",
"0.5755621",
"0.5755621",
"0.5755621",
"0.5755621",
"0.5755621",
"0.5755621",
"0.5755621",
"0.5750998",
"0.56999606",
"0.5697447",
"0.5696046",
"0.565788",
"0.56558955",
"0.5651036",
"0.56144226"
] |
0.6511984
|
0
|
Replaces u'celsius' with 'fahrenheit' in the provided string.
|
def replace_celcius(string):
return string.replace(u'celsius', u'fahrenheit')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def convert_celsius_to_fahrenheit(celsius):\n return celsius * 9.0 / 5 + 32",
"def _celsius_to_fahrenheit(self) -> None:\n if self.units == \"celsius\":\n self.value = (((self.value / 5) * 9) + 32).__round__(2)\n self.units = \"fahrenheit\"\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'celsius' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)",
"def translate_from_farenheit_to_celsius(farenheit: float) -> float:\n return (farenheit - 32) * 5./9.",
"def to_fahrenheit(celsius):\n\n return (1.8*celsius) + 32",
"def celsius_to_fahrenheit(celsius):\n fahrenheit = (celsius * (9.0/5.0)) + 32.0\n return fahrenheit",
"def fahrenheit(celsius):\n return 9 / 5 * celsius + 32",
"def convert_to_fahrenheit(self):\n try:\n self.root.ids.celsius_input.hint_text = 'Enter amount in Celsius'\n self.root.ids.fahrenheit_input.text = '{:.2f}'.format(float(self.root.ids.celsius_input.text)\n * 9.0 / 5 + 32)\n except ValueError:\n self.root.ids.celsius_input.text = ''\n self.root.ids.celsius_input.hint_text = 'Invalid number'",
"def fahrenheit_to_celsius():\n fahrenheit = ent_temperature.get()\n celsius = (5 / 9) * (float(fahrenheit) - 32)\n lbl_result[\"text\"] = f\"{round(celsius, 2)} \\N{DEGREE CELSIUS}\"",
"def convert_to_celsius(fahrenheit):\n return (fahrenheit - 32) * 5 / 9",
"def convert_to_celsius(self):\n try:\n self.root.ids.fahrenheit_input.hint_text = 'Enter amount in Fahrenheit'\n self.root.ids.celsius_input.text = '{:.2f}'.format((float(self.root.ids.fahrenheit_input.text) - 32)\n * 5 / 9)\n except ValueError:\n self.root.ids.fahrenheit_input.text = ''\n self.root.ids.fahrenheit_input.hint_text = 'Invalid number'",
"def replace_string(s):\n ret = s\n ret = ret.replace('<->', 'ב')\n ret = ret.replace('->', 'א')\n ret = ret.replace('-&', 'ג')\n ret = ret.replace('-|', 'ד')\n ret = ret.replace('?:', 'ה')\n return ret",
"def replace_greek_uni(s):\n for greek_uni, greek_spelled_out in greek_alphabet.items():\n s = s.replace(greek_spelled_out, greek_uni)\n return s",
"def __call__(self, ustring):\n ustring = unicode(ustring)\n return self.charef_rex.sub(self._replacer, ustring)",
"def replace_umlauts(string):\n result = string.lower()\n result = result.replace('ß', 'ss')\n result = result.replace('ä', 'ae')\n result = result.replace('ö', 'oe')\n result = result.replace('ü', 'ue')\n return result",
"def fahrenheit_to_celsius(fahrenheit):\n offset = 32\n multiplier = 5 / 9\n celsius = (fahrenheit - offset) * multiplier\n print(\"inside function:\", fahrenheit, offset, multiplier, celsius)\n return celsius",
"def fahrenheit_to_celsius(fahrenheit):\n offset = 32\n multiplier = 5 / 9\n celsius = (fahrenheit - offset) * multiplier\n print(\"inside function:\", fahrenheit, offset, multiplier, celsius)\n return celsius",
"def replace_greek_latin(s):\n for greek_spelled_out, latin in greek_to_latin.items():\n s = s.replace(greek_spelled_out, latin)\n return s",
"def normalize_teh_marbuta_safebw(s):\n\n return s.replace(u'p', u'h')",
"def fahrenheit(celsius):\n return ((celsius/5)*9)+32",
"def fahrenheit(T_in_celsius):\n return (T_in_celsius * 9 / 5) + 32",
"def clean(str):\n str = str.replace(u\"“\",u\"``\")\n str = str.replace(u\"”\",u\"''\")\n str = str.replace(u' \"',u\" ``\")\n str = str.replace(u'\"',u\"''\")\n str = str.replace(u'fi',u\"fi\")\n str = str.replace(u'fl',u\"fl\")\n str = str.replace(u'’',u\"'\")\n str = str.replace(u'–',u\"---\")\n str = str.replace(u'&',u\"\\\\&\")\n str = str.replace(u'#',u\"\\\\#\")\n str = str.replace(u'_',u\"\\\\_\")\n \n return str",
"def fahrenheit_to_celsius(temp):\n return (temp - 32) * 5/9",
"def en_to_fa(string):\n digits_map = {\n \"0\": \"۰\",\n \"1\": \"۱\",\n \"2\": \"۲\",\n \"3\": \"۳\",\n \"4\": \"۴\",\n \"5\": \"۵\",\n \"6\": \"۶\",\n \"7\": \"۷\",\n \"8\": \"۸\",\n \"9\": \"۹\",\n }\n\n if PY2:\n if isinstance(string, unicode):\n digits_map = {unicode(e, \"utf8\"): unicode(f, \"utf8\") for e, f in digits_map.iteritems()}\n\n return utils.replace(string, digits_map)",
"def celsius(fahrenheit):\n return 5 / 9 * (fahrenheit - 32)",
"def _replace_we(replace_we, string):\n new_string = \"\"\n for word in string.split():\n if word == \"we\" and replace_we is not None:\n new_string += replace_we + \" \"\n elif word == \"We\" and replace_we is not None:\n new_string += replace_we.capitalize() + \" \"\n else:\n new_string += str(word) + \" \"\n return new_string",
"def replace_greek_spelled_out(s):\n for greek_uni, greek_spelled_out in greek_alphabet.items():\n s = s.replace(greek_uni, greek_spelled_out)\n return s",
"def fixstring(str):\n\tstr = str.replace(u\"“\",u'\"').replace(u\"’\",u\"'\").replace(u\"â€\",u'\"')\n\tstr = cf.convert_entities(str)\n\tstr = cf.convert_unicode_u(str)\n\tstr = html_to_segments(str)\n\treturn str.strip()",
"def cleanse_string(cls, string: str) -> str:\n\n for colour in COLOURS:\n string = string.replace(colour, \"ginger\")\n\n return string.capitalize()",
"def __normalize_string(self, string):\n\n if self._dia & self._DIA_PRE93:\n string = string.replace(u\"Â\", u\"Î\")\n string = string.replace(u\"ROMÎNĂ\", u\"ROMÂNĂ\")\n elif self._dia & self._DIA_POST93:\n string = string.replace(u\"Î\", u\"Â\")\n string = string.replace(u\"Â \", u\"Î\")\n\n if self._dia & self._DIA_CEDILLA:\n string = string.replace(u\"Ș\", u\"Ş\")\n string = string.replace(u\"Ț\", u\"Ţ\")\n elif self._dia & self._DIA_COMMA:\n string = string.replace(u\"Ş\", u\"Ș\")\n string = string.replace(u\"Ţ\", u\"Ț\")\n\n if self._dia & self._DIA_NONE:\n string = string.replace(u\"Î\", u\"I\")\n string = string.replace(u\"Â\", u\"A\")\n string = string.replace(u\"Ă\", u\"A\")\n string = string.replace(u\"Ș\", u\"S\")\n string = string.replace(u\"Ț\", u\"T\")\n\n return string",
"def normalize_teh_marbuta_hsb(s):\n\n return s.replace(u'\\u0127', u'h')"
] |
[
"0.66244715",
"0.6384214",
"0.6268346",
"0.6167058",
"0.61540127",
"0.61191595",
"0.6103219",
"0.60855544",
"0.6050967",
"0.60101855",
"0.5962116",
"0.59152514",
"0.5893281",
"0.58617944",
"0.5766658",
"0.5766658",
"0.5739406",
"0.5734028",
"0.57056034",
"0.5679949",
"0.5635986",
"0.5594281",
"0.5554242",
"0.55424",
"0.5501126",
"0.54015553",
"0.5391205",
"0.5372377",
"0.533055",
"0.5305285"
] |
0.8532337
|
0
|
scale is an integer index that refers to an exponent applied to entSensorValue. The sensor_exponent comes from the cisco definitions, and I want to move those out of the plugin at some point.
|
def _entity_sensor_scale_to_exponent(sensor_scale):
sensor_exponent = [u'-24', u'-21', u'-18', u'-15', u'-12', u'-9', u'-6', u'-3', u'0', u'3',
u'6', u'9', u'12', u'15', u'18', u'21', u'24']
return int(sensor_exponent[sensor_scale - 1])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_scale():\r\n\r\n \r\n return 0.5",
"def scale(self):",
"def GetScale(self):\n ...",
"def xscale(value):\n impl.xscale(**locals())",
"def scale(self):\n return self._scale",
"def scale_to_factor(scale):\n return (B.pi / 2) / (2 * scale**2)",
"def with_scale_op(self, scale):\n\t\tself.variables['scale'] = scale\n\t\treturn self",
"def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")",
"def plate_scale(platescale):\n if platescale.unit.is_equivalent(si.arcsec / si.m):\n platescale_val = platescale.to_value(si.radian / si.m)\n elif platescale.unit.is_equivalent(si.m / si.arcsec):\n platescale_val = (1 / platescale).to_value(si.radian / si.m)\n else:\n raise UnitsError(\"The pixel scale must be in angle/distance or distance/angle\")\n\n return Equivalency(\n [(si.m, si.radian, lambda d: d * platescale_val, lambda a: a / platescale_val)],\n \"plate_scale\",\n {\"platescale\": platescale},\n )",
"def scale(self, axis, value):\r\n assert (axis < EventStream.numAxes), \"Axis number out of range\"\r\n if self.absInfo[axis]:\r\n return self.absInfo[axis].scale(value)\r\n else:\r\n return value",
"def __init__(self,scale):\n self.scale = scale",
"def scale(self, scale):\n\n self._scale = scale",
"def scale(self, scale):\n\n self._scale = scale",
"def scale(self):\n return self.distribution.scale",
"def scale(self, scale):\n\t\tself._current_score *= scale",
"def spaxel_scale(scale=4, wave=1.0):\n\n scale_rad = scale / MILIARCSECS_IN_A_RAD\n rho = scale_rad * ELT_DIAM / (wave * 1e-6)\n print(rho)",
"def scale(self):\n return self._gev_bijector.scale",
"def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])",
"def convert_scale(g, op, block):\n\n scale = op.attr(\"scale\")\n bias = op.attr(\"bias\")\n bias_after_scale = op.attr(\"bias_after_scale\")\n x = g.get_node(op.input(\"X\")[0])\n if np.isclose(scale, 1.0) and np.isclose(bias, 0.0):\n out = x\n else:\n if np.isclose(bias, 0.0):\n out = x * _expr.const(np.array(scale).astype(\"float32\"))\n elif np.isclose(scale, 1.0):\n out = x + _expr.const(np.array(bias).astype(\"float32\"))\n else:\n if bias_after_scale:\n out = x * _expr.const(np.array(scale).astype(\"float32\")) + _expr.const(\n np.array(bias).astype(\"float32\")\n )\n else:\n out = (x + _expr.const(np.array(bias).astype(\"float32\"))) * _expr.const(\n np.array(scale).astype(\"float32\")\n )\n g.add_node(op.output(\"Out\")[0], out)",
"def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value",
"def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0",
"def get_scaled_value(self, value):\r\n raise NotImplementedError()",
"def get_scale(scale='major', key=60):\n SCALE_DICT = get_keys()\n notes = [key] + [(key + i) for i in np.cumsum(SCALE_DICT[scale])]\n return notes",
"def _scale(x):\n scaleFactor = 1\n _ret = int(x/scaleFactor)\n return _ret",
"def setScale(self, mode='ACC', scale=0):\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscale = self.dec2BinList(value=scale,bits=2)\r\n\t\tcurrentVal[3] = scale[0]\r\n\t\tcurrentVal[4] = scale[1]\r\n\t\tcurrentVal = self.binList2Dec(currentVal)\r\n\t\tself.write(reg, currentVal)",
"def any_scale(scale):\n return scale",
"def scale_value(self):\n return self._scale_value[2]",
"def set_scale(self, scale):\n scale = float(scale)\n if scale <= 1:\n raise ValueError('The scale parameter must exceed 1.')\n self._a = scale",
"def get_scale_op(self):\n\t\treturn self.variables.get('scale')",
"def get_scale(self):\r\n try: return self.scale[0], self.scale[1], self.scale[2]\r\n except: return self.scale, self.scale, self.scale"
] |
[
"0.69460976",
"0.68487644",
"0.67950284",
"0.6772022",
"0.6668573",
"0.6627095",
"0.64557767",
"0.64412534",
"0.64349043",
"0.6386471",
"0.6369453",
"0.6347679",
"0.6347679",
"0.6338465",
"0.63258296",
"0.63189137",
"0.62955534",
"0.6264059",
"0.62518895",
"0.623797",
"0.62267363",
"0.6220191",
"0.62154436",
"0.620296",
"0.6190978",
"0.61840284",
"0.61567557",
"0.6156554",
"0.6149482",
"0.61430204"
] |
0.75308496
|
0
|
Pulls the entPhysicalClass items tree. We're interested in '6' (Power Supply Units). note that this could be expanded out if there's a need.
|
def _power_supplies(self):
power_supplies = {}
varbinds = self._snmp_connection.bulk_walk(entPhysicalClass)
for varbind in varbinds:
if varbind.value == u'6':
psu_id = int(varbind.index)
power_supplies[psu_id] = {u'psu_id': psu_id, u'psu_name': self._entity_physical_names[psu_id]}
"""
Note that this will return a power state for the device as a whole, which we ignore because we care about
supplies
"""
varbinds = self._snmp_connection.bulk_walk(cefcFRUPowerOperStatus)
for varbind in varbinds:
psu_id = int(varbind.index)
if psu_id in power_supplies:
power_supplies[psu_id][u'psu_state'] = int(varbind.value)
if not len(power_supplies):
self._logger.warn(
u'Failed to get power enrichments on device "%s" with model "%s"' %
(self._device_fqdn, self._cisco_model))
return power_supplies
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def units(self):\n return self.children()",
"def list_power_supply_units(self):\n\n doc = self.client.enumerate(uris.CIM_PowerSupply)\n\n psus = doc.find('.//s:Body/wsen:EnumerateResponse/wsman:Items',\n wsman.NS_MAP)\n\n return [self._parse_psus(psu) for psu in psus]",
"def get_items(self):\n\n self.__logger.info(\"Thermo Builder Started\")\n\n # All relevant materials that have been updated since thermo props were last calculated\n q = dict(self.query)\n q.update(self.materials.lu_filter(self.thermo))\n comps = [m[\"elements\"] for m in self.materials().find(q, {\"elements\": 1})]\n\n self.__logger.info(\"Found {} compositions with new/updated materials\".format(len(comps)))\n\n # Only yields maximal super sets: e.g. if [\"A\",\"B\"] and [\"A\"] are both in the list, will only yield [\"A\",\"B\"]\n # as this will calculate thermo props for all [\"A\"] compounds\n processed = set()\n # Start with the largest set to ensure we don\"t miss superset/subset relations\n for chemsys in sorted(comps, key=lambda x: len(x), reverse=True):\n if \"-\".join(sorted(chemsys)) not in processed:\n processed |= self.chemsys_permutations(chemsys)\n yield self.get_entries(chemsys)",
"def units(inventory):\n return inventory.reduce(convert.get_units)",
"def get_inventory(self, node):",
"def _get_items(self, game, raw_json=False):\n\n url = \"http://api.steampowered.com/IEconItems_{}/GetSchema/v0001/?key={}\".format(game, self.api_key)\n json_data = self._get_json(url)\n\n if raw_json:\n return json_data['result']['items']\n\n all_items = {}\n for item in json_data[\"result\"][\"items\"]:\n values = {}\n values['defindex'] = item.get('defindex')\n values['item_class'] = item.get('item_class')\n values['item_type_name'] = item.get('item_type_name')\n values['proper_name'] = item.get('proper_name')\n values['item_slot'] = item.get('item_slot')\n values['item_quality'] = item.get('item_quality')\n values['image_url'] = item.get('image_url')\n values['image_url_large'] = item.get('image_url_large')\n values['craft_class'] = item.get('craft_class')\n\n if item.get('capabilities') is not None:\n capable = item.get('capabilities')\n capabilities = {}\n capabilities['nameable'] = capable.get('nameable')\n capabilities['can_gift_wrap'] = capable.get('can_gift_wrap')\n capabilities['can_craft_mark'] = capable.get('can_craft_mark')\n capabilities['can_be_restored'] = capable.get('can_be_restored')\n capabilities['strange_parts'] = capable.get('strange_parts')\n capabilities['can_card_upgrade'] = capable.get('can_card_upgrade')\n values['capabilities'] = capabilities\n if item.get('used_by_classes') is not None:\n game_classes = []\n for character_class in item.get('used_by_classes'):\n game_classes.append(character_class)\n values['used_by_classes'] = game_classes\n\n all_items[item.get('name')] = values\n\n return all_items",
"def populate_equipment(xml_root, unit_obj):\n\n equipment_elements = xml_root.xpath('baseloadout/equipment')\n for equip_e in equipment_elements:\n e_type = equip_e.xpath('type')[0].text\n e_name = equip_e.xpath('name')[0].text\n\n if e_type in ['energy', 'ballistic', 'missile']:\n add_weapon(equip_e, unit_obj)\n elif 'Anti-Missile' in e_name and '@' not in e_name:\n # These are of type equipment, but BTMux handles them like weapons.\n add_weapon(equip_e, unit_obj)\n elif e_type == 'ammunition':\n add_ammo(equip_e, unit_obj)\n elif e_type == 'physical':\n _add_equipment(equip_e, unit_obj, PHYSICAL_WEAPON_MAP)\n elif e_type in ['equipment', 'CASE', 'TargetingComputer']:\n _add_equipment(equip_e, unit_obj, EQUIPMENT_MAP)\n else:\n raise ValueError(\"Invalid equipment type: %s\" % e_type)",
"def get_top_weapons(self):\n try:\n top_weapon = self.page.find(\"div\", {\"class\": \"weapon\"}).text.split(\" \")\n\n top_weapon_informations = {\"Weapon\": top_weapon[1], \"Headshots\": top_weapon[-5],\"Bodyshot\": top_weapon[-4],\n \"Leg_shot\":top_weapon[-3], \"Kills\": top_weapon[-1]}\n\n return top_weapon_informations\n\n except Exception as e:\n print(f\"Error: {e}. Make sure if it's the correct nickname and tag. Otherwise, it might be about your account visibility, check if it's public.\")\n print(\"Otherwise, it might be about your account visibility, check if it's public. Read : https://github.com/HicaroD/Valorant-Stats\\n\")",
"def get(self, item_id, class_id):\n return get_item_info_with_spell(item_id, class_id)",
"def get_all_items(unit) -> list:\n items = []\n for item in unit.items:\n if item.multi_item:\n for subitem in item.subitems:\n items.append(subitem)\n else:\n items.append(item)\n return items",
"def get_items(self):\n self.logger.info(\"Electrode Builder Started\")\n\n # All updated chemical systems that contain at least one redox active elements\n q = dict(self.query)\n q.update(self.materials.lu_filter(self.thermo))\n q.update({\"chemsys\": {\"$in\": self.redox_els}})\n\n comps = self.materials.distinct(\"chemsys\", q)\n\n self.logger.info(\n \"Found {} compositions with new/updated materials\".format(len(comps)))\n\n # Only yields maximal super sets: e.g. if [\"A\",\"B\"] and [\"A\"] are both in the list, will only yield [\"A\",\"B\"]\n # as this will calculate thermo props for all [\"A\"] compounds\n processed = set()\n\n # Start with the largest set to ensure we don\"t miss superset/subset\n # relations\n for chemsys in sorted(comps, key=lambda x: len(x.split(\"-\")), reverse=True):\n if chemsys not in processed:\n processed |= chemsys_permutations(chemsys)\n yield self.get_entries(chemsys)",
"def getCPUandPGandSlot(itemsIter):\n items = []\n\n for item in itemsIter:\n cpu = pg = None\n try:\n cpu = TypeAttributes.objects.get(typeID=item, attributeID=ATTRIBUTE_CPU)\n pg = TypeAttributes.objects.get(typeID=item, attributeID=ATTRIBUTE_PG)\n except TypeAttributes.DoesNotExist:\n pass\n\n classes = []\n if item.slot == Item.HIGH:\n classes.append(\"high\")\n if item.slot == Item.MED:\n classes.append(\"med\")\n if item.slot == Item.LOW:\n classes.append(\"low\")\n if item.slot == Item.RIG:\n classes.append(\"rig\")\n if item.slot == Item.SUB:\n classes.append(\"sub\")\n if item.hardpoint == Item.MISSILE:\n classes.append(\"missile\")\n if item.hardpoint == Item.TURRET:\n classes.append(\"turret\")\n\n items.append((item, cpu, pg, \" \".join(classes)))\n\n return items",
"def test_get_systems_expanded(self):\n pass",
"def get_item(self, usage_key, depth=0):\r\n store = self._get_modulestore_for_courseid(usage_key.course_key)\r\n return store.get_item(usage_key, depth)",
"def products(self):\r\n return products.Products(self)",
"def get_cpu_units(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuUnits', self.handle)",
"def products(self):\r\n return self._products",
"def get_item_subclass(self, region, namespace, class_id, subclass_id, **filters):\n filters['namespace'] = namespace\n params = [class_id, subclass_id]\n resource = 'data/wow/item-class/{0}/item-subclass/{1}'\n return self.get_resource(resource, region, *params, **filters)",
"def get_products(classification):\n call = build_call('attr', classification)\n return request_data(call)",
"def _get_package_items(self):\r\n mask = \"mask[description,capacity,prices.id,categories[name,id]]\"\r\n package = self.client['Product_Package']\r\n return package.getItems(id=46, mask=mask)",
"def unit_classes(self):\n return self._unit_classes",
"def get_list_powers(self):\r\n _debug('simq03b_api.get_list_powers')\r\n \r\n s = self.query('SOUR:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a",
"def GetEquipmentClass(typename):\n p_match = EQUIPMENT_CLASS_REGEX.match(typename)\n if p_match:\n return p_match.group(2)\n return None",
"def generate_receipt_instructions(self):\r\n instruction_set = set([]) # heh. not ia32 or alpha or sparc\r\n instruction_dict = {}\r\n order_items = OrderItem.objects.filter(order=self).select_subclasses()\r\n for item in order_items:\r\n item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()\r\n instruction_dict[item_pk_with_subclass] = set_of_html\r\n instruction_set.update(set_of_html)\r\n return instruction_dict, instruction_set",
"def info_equipment_get():\n equipment = _equipment_by_group()\n return equipment, 200",
"def get_devices(self):\n data = {\n \"device_id\": self.uuid,\n \"cmd\": \"get_account_units\",\n \"account_token\": self.api_token\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n response = requests.post(\"{}/box_pin\".format(self.BASE_URL),\n data=json.dumps(data),\n headers=headers)\n response_json = response.json()\n if not response_json.get(\"success\"):\n raise ValueError(response_json.get(\"error_message\"))\n\n units_json = response_json.get(\"units\")\n devices = []\n for unit in units_json:\n device = Charger(unit, self)\n device.update_state()\n devices.append(device)\n\n return devices",
"async def top_10_class(self):\r\n players = await self.get_players()\r\n classes = []\r\n # Add all players\r\n for player in players:\r\n classes.append(int(player['classId']))\r\n del classes[10:]\r\n await self.bot.send_message('Top 10 3v3 Composition:')\r\n for xvar in range(1, 13):\r\n if players.count(xvar) > 0:\r\n await self.bot.send_message('{:s}: {:d}'.format(self.classes[xvar - 1], classes.count(xvar)))",
"def _get_units(self, unit_tag):\n\n # a list that contains apartment unit's information\n unit = []\n # use a loop to list all the cells in a row \n for cell in unit_tag.find_all('td'):\n if cell.attrs: # omit the cell with nothing in it \n # look for the apartment #, however, this info is not\n # consistent across the entire webiste\n if cell['data-tid'] == 'pdpfloorplans-unit-displayText':\n unit_num = cell.get_text()\n unit.append(unit_num)\n # scrape the price of the unit\n if cell['data-tid'] == 'pdpfloorplans-unit-price':\n try:\n unit_price = cell.get_text().replace('$', '')\n # try to convert the price to float \n unit.append(float(unit_price))\n except:\n # if there's no price for this unit\n # append the list with a null value \n unit.append(np.nan)\n if cell['data-tid'] == 'pdpfloorplans-unit-bedbath':\n try:\n # try to extract the tags that include the number\n # of bedrooms and bathrooms \n bedbath_tag = cell.find_all('span')\n bed_tag, bath_tag = bedbath_tag[0], bedbath_tag[1]\n # regular expression pattern for extracting any types\n # of numbers, including integer and floating numbers \n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n bed = re.findall(pattern, bed_tag.get_text())\n bath = re.findall(pattern, bath_tag.get_text())\n bed_unit, bath_unit = 0, 0\n if bed:\n bed_unit = bed[0]\n if bath:\n bath_unit = bath[0]\n unit.append(float(bed_unit))\n unit.append(float(bath_unit))\n except:\n # if the convertion failed, append the list\n # will two null values \n unit.append(np.nan)\n unit.append(np.nan)\n if cell['data-tid'] == 'pdpfloorplans-unit-sqft':\n # follow the same procedure as above, but this time\n # scrape the square foot of the apartment unit\n try:\n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n sqft_unit = re.findall(pattern, cell.get_text())[0]\n unit.append(float(sqft_unit))\n except:\n unit.append(np.nan)\n return unit",
"def build_chest_item(self):\n\t\tif not self.contents_data: return None\n\t\tconstructor = self.contents_data[ ITEM_CLASS ]\n\t\tkey = self.contents_data[ ITEM_KEY ]\n\t\treturn build_item( constructor, key, 0, 0 )",
"def get_PU(self):\n return self._powerups"
] |
[
"0.4972727",
"0.49699154",
"0.48798373",
"0.4874304",
"0.47920606",
"0.4761043",
"0.475905",
"0.47154984",
"0.46501327",
"0.46423516",
"0.46099088",
"0.46000573",
"0.45994005",
"0.4547769",
"0.45468974",
"0.45461828",
"0.45072427",
"0.44990483",
"0.44976267",
"0.44847265",
"0.44700786",
"0.44363925",
"0.44322738",
"0.44302166",
"0.44206357",
"0.44114858",
"0.44005832",
"0.43976372",
"0.43946508",
"0.43853888"
] |
0.49780378
|
0
|
Maps indices used in the Cisco Entity Mib to the module number
|
def _module_numbers(self):
module_numbers = {}
varbinds = self._snmp_connection.bulk_walk(entPhysicalParentRelPos)
for varbind in varbinds:
module_numbers[int(varbind.index)] = varbind.value
return module_numbers
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_module_indexes(self, generation):\n self.species_module_index_map = {}\n\n if Config.blueprint_nodes_use_representatives:\n # For representatives species_module_index_map becomes: representative -> (species index, member index)\n for rep, module in self.species_module_ref_map.items():\n if module is None:\n continue\n\n for species_index, species in enumerate(generation.module_population.species):\n if module in species:\n self.species_module_index_map[rep] = \\\n (species_index, generation.module_population.species[species_index].members.index(module))\n break\n else:\n for spc_index, module in self.species_module_ref_map.items():\n if module is None:\n continue\n\n if spc_index < len(generation.module_population.species) and \\\n module in generation.module_population.species[spc_index]:\n\n self.species_module_index_map[spc_index] = \\\n generation.module_population.species[spc_index].members.index(module)\n\n elif Config.allow_cross_species_mappings:\n for new_species_index, species in enumerate(generation.module_population.species):\n if module in species:\n \"\"\"found module in new species\"\"\"\n self.species_module_index_map[spc_index] = \\\n (new_species_index,\n generation.module_population.species[new_species_index].members.index(module))\n break",
"def mapping_to_index(self) -> Dict[int, int]:\n if not self._atom_mappings:\n self._atom_mappings = {\n atom.GetAtomMapNum(): atom.GetIdx()\n for atom in self.rd_mol.GetAtoms()\n if atom.GetAtomMapNum()\n }\n return self._atom_mappings",
"def mapping_to_index(self) -> Dict[int, int]:\n if not self._atom_mappings:\n self._atom_mappings = {\n atom.GetAtomMapNum(): atom.GetIdx()\n for atom in self.mapped_mol.GetAtoms()\n if atom.GetAtomMapNum()\n }\n return self._atom_mappings",
"def id_index_map(self):\n result = {}\n for index, component_data in iteritems(self):\n result[id(component_data)] = index\n return result",
"def update_id2idx(self):\n self._id2idx = {}\n for n, cell in enumerate(self._cell_list):\n self._id2idx[cell.id()] = n",
"def csm_indices(csm):\r\n return csm_properties(csm)[1]",
"def get_label_ix_mapping(labels):\n return {label: i for i, label in enumerate(labels)}",
"def create_matrix_mapping(train_mh, unk_vec_id):\n mh_index_map = {}\n matrix_idx = 0\n for vector_idx in train_mh:\n if vector_idx == unk_vec_id:\n unk_matrix_id = matrix_idx\n mh_index_map[vector_idx] = matrix_idx\n matrix_idx += 1\n return mh_index_map, unk_matrix_id",
"def index_to_mapping(self) -> Dict[int, int]:\n if not self._reverse_atom_mappings:\n self._reverse_atom_mappings = {\n index: mapping for mapping, index in self.mapping_to_index.items()\n }\n return self._reverse_atom_mappings",
"def nodeid_to_index(G):\n\n d = {node_id: i for i, node_id in enumerate(G.nodes)}\n\n return d",
"def _create_img_id_to_idx(self):\n with h5py.File(self.image_features_path, 'r') as features_file:\n coco_ids = features_file['ids'][()]\n coco_id_to_index = {id: i for i, id in enumerate(coco_ids)}\n return coco_id_to_index",
"def _read_module_index(str_or_file):\n yaml_content = syaml.load(str_or_file)\n index = {}\n yaml_index = yaml_content[\"module_index\"]\n for dag_hash, module_properties in yaml_index.items():\n index[dag_hash] = ModuleIndexEntry(\n module_properties[\"path\"], module_properties[\"use_name\"]\n )\n return index",
"def to_mapping(self, dim):\n mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_LABELS')\n for name, label, meta in zip(self.name, self.label, self.meta):\n label_table = cifti2.Cifti2LabelTable()\n for key, value in label.items():\n label_table[key] = (value[0],) + tuple(value[1])\n named_map = cifti2.Cifti2NamedMap(name, cifti2.Cifti2MetaData(meta), label_table)\n mim.append(named_map)\n return mim",
"def to_mapping(self, dim):\n mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_SERIES')\n mim.series_exponent = 0\n mim.series_start = self.start\n mim.series_step = self.step\n mim.number_of_series_points = self.size\n mim.series_unit = self.unit\n return mim",
"def from_index_mapping(cls, mim):\n nbm = sum(bm.index_count for bm in mim.brain_models)\n voxel = np.full((nbm, 3), fill_value=-1, dtype=int)\n vertex = np.full(nbm, fill_value=-1, dtype=int)\n name = []\n\n nvertices = {}\n affine, shape = None, None\n for bm in mim.brain_models:\n index_end = bm.index_offset + bm.index_count\n is_surface = bm.model_type == 'CIFTI_MODEL_TYPE_SURFACE'\n name.extend([bm.brain_structure] * bm.index_count)\n if is_surface:\n vertex[bm.index_offset : index_end] = bm.vertex_indices\n nvertices[bm.brain_structure] = bm.surface_number_of_vertices\n else:\n voxel[bm.index_offset : index_end, :] = bm.voxel_indices_ijk\n if affine is None:\n shape = mim.volume.volume_dimensions\n affine = mim.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix\n return cls(name, voxel, vertex, affine, shape, nvertices)",
"def node_mapping(self):\n ...",
"def build_pixels_map(nmodules):\n\tglobal pmap\n\tassert nmodules % pconf.modules_in_row == 0\n\n\tsensor_width = pconf.modules_in_row * pconf.mod_w\n\tsensor_height = (nmodules / pconf.modules_in_row) * pconf.mod_h\n\n\t# The module index for every image pixel\n\tm_ind = np.empty((sensor_height, sensor_width), dtype=int)\n\t# The channel index for every image pixel\n\tc_ind = np.empty((sensor_height, sensor_width), dtype=int)\n\n\tfor r in range(sensor_height):\n\t\tfor c in range(sensor_width):\n\t\t\tm_ind[r, c], (mr, mc) = pconf.mod_resolver(r, c, nmodules)\n\t\t\tassert mr <= r and mc <= c\n\t\t\tc_ind[r, c] = pconf.mod_pixels[r - mr][c - mc]\n\n\tif pconf.image_fliplr:\n\t\tm_ind, c_ind = np.fliplr(m_ind), np.fliplr(c_ind)\n\tif pconf.image_flipud:\n\t\tm_ind, c_ind = np.flipud(m_ind), np.flipud(c_ind)\n\tif pconf.image_transpose:\n\t\tm_ind, c_ind = np.transpose(m_ind), np.transpose(c_ind)\n\n\tpmap = (m_ind, c_ind)",
"def _create_idx(self):\n self._idx = {}\n for idx, (L, M, N) in enumerate(self.modes):\n if L not in self._idx:\n self._idx[L] = {}\n if M not in self._idx[L]:\n self._idx[L][M] = {}\n self._idx[L][M][N] = idx",
"def get_family_id_to_index():\n \n family_ids = open(\n resource_filename('contextual_lenses.resources', 'pfam_family_ids.txt'),\n 'r').readlines()\n family_id_to_index = {}\n for i, family_id in enumerate(family_ids):\n family_id_to_index[family_id.replace('\\n', '')] = i\n\n return family_id_to_index",
"def map_id_to_idx(self, class_ids):\n class_idx = torch.zeros(class_ids.shape, dtype=int)\n for k, v in self.id2idx.items():\n class_idx[class_ids == k] = v\n\n class_idx = class_idx.to(device)\n return class_idx",
"def build_label_mapping(\n grouped_targeted_labels: List[Set[str]],\n nontargeted_labels: Optional[Set[str]] = None,\n) -> Dict[str, int]:\n mapping = {\n label: i + 1\n for i, label_group in enumerate(grouped_targeted_labels)\n for label in label_group\n }\n\n if nontargeted_labels:\n mapping.update({label: 0 for label in nontargeted_labels})\n\n return mapping",
"def token_to_idx(self) -> Dict[Hashable, int]:\n return self._token_to_idx",
"def vector_indx_to_map_matrix_indx(index,senzory_map):\n xs = dict(zip(np.unique(senzory_map[:,0]), it.count()))\n ys = dict(zip(np.negative(np.unique(senzory_map[:,1])), it.count()))\n x, y = senzory_map[index]\n return ys[y],xs[x]",
"def InterfaceIndex(self) -> int:",
"def InterfaceIndex(self) -> int:",
"def set_indices(self, part_instance_counts):\n type_indices = {}\n for entry in self._entries:\n try:\n entry.set_indices(\n model_type_index=type_indices.setdefault(entry.ENTRY_SUBTYPE, 0),\n instance_count=part_instance_counts.get(entry.name, 0),\n )\n except KeyError as e:\n raise SoulstructError(\n f\"Invalid map component name for {entry.ENTRY_SUBTYPE.name} model {entry.name}: {e}\"\n )\n else:\n type_indices[entry.ENTRY_SUBTYPE] += 1",
"def _get_idx_maps(self, types, initial_mapping=None):\n initial_mapping = constants.INITIAL_MAPPING if initial_mapping is None else initial_mapping\n # generate type to index mappings\n self.type_to_idx['word'] = Preprocessor.type_to_idx(types['word'], initial_mapping['word'])\n self.type_to_idx['char'] = Preprocessor.type_to_idx(types['char'], initial_mapping['word'])\n self.type_to_idx['tag'] = Preprocessor.type_to_idx(types['tag'], initial_mapping['tag'])",
"def indices_get_mapping(es):\n index = 'customer'\n\n print(es.indices.get_mapping(index=index))",
"def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}",
"def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}"
] |
[
"0.6726213",
"0.63144815",
"0.62095433",
"0.6148621",
"0.60828507",
"0.5993135",
"0.5982882",
"0.59800065",
"0.58837277",
"0.5866135",
"0.5795918",
"0.57755977",
"0.57060426",
"0.57042277",
"0.56990916",
"0.5678817",
"0.56643337",
"0.56333447",
"0.56311905",
"0.5612962",
"0.5585627",
"0.55776435",
"0.5575251",
"0.5574201",
"0.5574201",
"0.55539644",
"0.5553877",
"0.55402786",
"0.5532342",
"0.5532342"
] |
0.6614143
|
1
|
Find all the profiles available in the carbonblack.credentials files.
|
def _find_cb_profiles():
dir_locations = [".carbonblack", os.path.join(os.path.expanduser("~"), ".carbonblack")]
cred_file = "credentials.response"
profiles = []
for dir in dir_locations:
cred_file_path = os.path.join(dir, cred_file)
_MOD_LOGGER.debug("Searching CB profiles on '%s'", cred_file_path)
if os.path.exists(cred_file_path):
_MOD_LOGGER.debug("File exists, parsing...")
config = configparser.ConfigParser(default_section="cbbackend", strict=True)
config.read(cred_file_path)
profiles += [sec_name for sec_name in config.keys() if sec_name != "cbbackend"]
if profiles:
_MOD_LOGGER.debug("Requested to read 'all' profiles. Found: %s", ",".join(profiles))
return profiles
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def available_profiles(cls) -> List[str]:\n return list(cfg.get(\"profiles\"))",
"def list(self):\n # List is to be extended (directories should not have a trailing slash)\n paths_to_ignore = ['.DS_Store']\n\n profiles = []\n cache = ClientCache(self._conan_api.cache_folder)\n profiles_path = cache.profiles_path\n if os.path.exists(profiles_path):\n for current_directory, _, files in os.walk(profiles_path, followlinks=True):\n files = filter(lambda file: os.path.relpath(\n os.path.join(current_directory, file), profiles_path) not in paths_to_ignore, files)\n\n for filename in files:\n rel_path = os.path.relpath(os.path.join(current_directory, filename),\n profiles_path)\n profiles.append(rel_path)\n\n profiles.sort()\n return profiles",
"def get_profiles(self):\n # print(self.uir) #checkpoint\n if os.path.isdir(self.uir+\"/profiles\"):\n profiles=os.listdir(self.uir+\"/profiles\")\n # print(profiles) #checkpoint\n for profile in profiles:\n wsadmin=self.uir+\"/profiles/\"+profile+\"/bin/wsadmin.bat\"\n if os.path.isfile(wsadmin): #check for wsadmin.bat.\n self.profiles.append(self.uir+\"/profiles/\"+profile)\n\n else: print(self.uir+' Instance does not have \"profile\" folder in '+self.uir)\n return",
"def get_profiles(self):\n profiles = [['Profile name', 'GUID']]\n r = self.system_cursor.execute('{Call wtGetProfileList()}')\n for row in r.fetchall():\n profiles.append([row.PROFILE_NAME, row.PROFILE_GUID])\n return profiles",
"def profiles(self):\n if not self._profiles:\n self.GetAllProfiles()\n return self._profiles",
"def list_profiles(self, params):\n return self.profiles",
"def get_profile_names():\n import botocore.session\n return botocore.session.get_session().full_config.get('profiles', {}).keys()",
"def list_profiles(self) -> dict:\n wsc = self.read_ws_configuration()\n out = OrderedDict()\n for name, json in wsc.profiles.items():\n out[name] = Profile(name, self.ws_data_folder / name, json)\n # Try to find current profile\n try:\n out[self.current_profile_name].is_current = True\n except Exception:\n pass\n return out",
"def loadProfiles():\n with open(userProfilesDir, \"r\") as infile:\n profiles = json.loads(\"\\n\".join(infile.readlines()))\n infile.close()\n return profiles",
"def get_creds(profile, conf):\n if profile:\n store = file.Storage(conf.get_profile(profile))\n return store.get()\n elif len(conf.list_profiles()) > 0:\n store = file.Storage(conf.get_profile(conf.list_profiles()[0]))\n return store.get()\n else:\n return None",
"def get_all_profiles(store=\"local\"):\n return {\n \"Domain Profile\": get_all_settings(profile=\"domain\", store=store),\n \"Private Profile\": get_all_settings(profile=\"private\", store=store),\n \"Public Profile\": get_all_settings(profile=\"public\", store=store),\n }",
"def profiles_names(self):\n url = get_url('profiles')\n response = self._get(url)\n raise_on_error(response)\n return response.json()",
"def profiles(self):\n return self._profiles",
"def profiles(self):\n return self._profiles",
"def profiles(self):\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n return list(filter(lambda x: x is not None, executor.map(self.profile_details, self.profiles_names())))",
"def profile_files(profile):\n flist = os.listdir(osp.join(profile, 'startup'))\n profile_path = osp.join(osp.abspath('.'), profile)\n return [osp.join(profile_path, 'startup', x) for x in flist]",
"def import_profiles(file_path):\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n return profiles",
"def get_profiles(self): #video profiles\n return self.camera_media.GetProfiles()",
"def GetAllProfiles(self):\n profiles = []\n feed_uri = self._gd_client.GetFeedUri('profiles')\n while feed_uri:\n feed = self._gd_client.GetProfilesFeed(uri=feed_uri)\n profiles.extend(feed.entry)\n feed_uri = feed.FindNextLink()\n self._profiles = profiles",
"def globalProfile():\n return context.profiles",
"def get_storage_profiles(self):\n return self.config[self.ROOT].keys()",
"def get_list(profiles_folder, logger):\n profile_list = []\n with scandir(profiles_folder) as it:\n for entry in it:\n if entry.is_file():\n filepath = profiles_folder + entry.name\n profile = json_from_file(filepath, logger)\n if profile is not None:\n try:\n profile_list.append({\"filepath\":filepath, \"name\":profile[\"name\"], \"description\":profile[\"description\"]})\n except AttributeError:\n logger.error(\"Missing attributes in \" + filepath)\n logger.error(str(profile))\n return profile_list",
"async def test_not_retrieve_profiles_with_account_token(self):\n provisioning_client = ProvisioningProfileClient(httpClient, 'token')\n try:\n await provisioning_client.get_provisioning_profiles(5, 'active')\n except Exception as err:\n assert err.__str__() == 'You can not invoke get_provisioning_profiles method, because you ' + \\\n 'have connected with account access token. Please use API access token from ' + \\\n 'https://app.metaapi.cloud/token page to invoke this method.'",
"def get_profiles(args):\n # Use profile from cli if provided\n if args.profile and not args.update_config:\n return [args.profile]\n\n # Run config to get or set the config file\n config = configparser.ConfigParser()\n\n if os.path.isfile(SETTINGS_FILE) and not args.update_config:\n # Get profiles from config\n config.read(SETTINGS_FILE)\n else:\n # Get default profiles from user\n try:\n profiles_input = input(\n 'Please enter space separated list of profiles to use: '\n )\n except KeyboardInterrupt:\n # Avoid ugly stacktrace on ctrl-c in input\n sys.exit(1)\n # Setup config\n config.add_section('profiles')\n config.set('profiles', 'default', profiles_input)\n # Write to config\n config_file = open(SETTINGS_FILE, 'w')\n config.write(config_file)\n config_file.close()\n\n return config.get('profiles', 'default').split()",
"def get_user_profiles(self):\n print 'inside get user profiles'\n print 'self.username :' + self.username\n g = GoogleAnalyticsAPI(self.username)\n if g:\n print 'GA client exists'\n user_accounts = g.get_user_accounts()\n return user_accounts.get('items')\n else:\n print 'GA client does not exist'\n return []",
"def find_ready_profiles(project, script):\n profiles_ready = []\n for profile in Profile.objects.filter(script=script):\n errors = check_process_ready(project, profile)\n if len(errors) == 0:\n profiles_ready.append(profile)\n return profiles_ready",
"async def all(self):\n log('retrieving profile cards..')\n start = time.monotonic()\n profiles = await asyncio.gather(*list(\n map(lambda profile: self.retrieve(str(profile['id'])), await self.list())\n ))\n elapsed = \"%0.2fs\" % (time.monotonic() - start,)\n log(\"retrieved {} profile cards in {}\".format(len(profiles), elapsed))\n return {\n \"hits\": len(profiles),\n \"updated\": time.time() * 1000,\n \"time\": elapsed,\n \"applicants\": list(filter(None, map(self.parse, profiles)))\n }",
"def get_services(self):\n\n services = []\n\n for p in self.config['auth_profiles']:\n services.append(self.get_service(p))\n return services",
"def get_all_profiles(self) -> List[Profile]:\n return [self.model.parse_obj(profile) for profile in self.read_records(SyncMode.full_refresh)]",
"def fetch_all(profile):\n params = {}\n params[\"profile\"] = profile\n response = utils.do_request(instanceprofile, \"get\", params)\n data = utils.get_data(\"InstanceProfiles\", response)\n return data"
] |
[
"0.70810413",
"0.66425323",
"0.6593193",
"0.6515927",
"0.6484026",
"0.6467895",
"0.6452797",
"0.6367274",
"0.6340477",
"0.63084596",
"0.6248525",
"0.6154834",
"0.6113096",
"0.6113096",
"0.60523206",
"0.6013961",
"0.59873754",
"0.5964247",
"0.59622484",
"0.5932647",
"0.5903166",
"0.5892368",
"0.588452",
"0.58650446",
"0.58578384",
"0.5787706",
"0.57339257",
"0.572064",
"0.5691656",
"0.5683336"
] |
0.8225347
|
0
|
machines host1,host2,host3... Set the machines where the job will be executed. A list of machines where the job will run, separated by commas or space.
|
def do_machines(self, args):
lex_parser = shlex.shlex(args)
lex_parser.whitespace += ","
lex_parser.wordchars += "-"
machine_list = [m for m in lex_parser]
#TODO check if the hostname has space in it, if yes, invalid.
if not machine_list:
print("Invalid. See the help.")
return False
self.hostname_list = machine_list
print(f"Set to run in {len(self.hostname_list)} machines.")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def octopus_machines(self, msg, args):\r\n self.machines.send_machines(msg, args)",
"def _GetMachineList(self):\n machines = self._experiment.remote\n # All Label.remote is a sublist of experiment.remote.\n for l in self._experiment.labels:\n for r in l.remote:\n assert r in machines\n return machines",
"def check_machines(args):\n session = Session()\n finder = MachineFinder(args.finder)\n machines = finder.find_machines(session)\n print \"Machines Found: %d\" % (len(machines))\n if len(machines) > 0:\n if len(machines) > 1:\n print\n print CheckStatus.summary_header()\n print \"=\" * 80\n pool = Pool(POOL_SIZE)\n results = pool.map(check, machines)\n for result in results:\n machine = session.query(PoolMachine).filter_by(id=result[0].id).first()\n check_statuses = result[1]\n print machine.hostname + \":\"\n print '-' * (len(machine.hostname) + 1)\n for check_status in check_statuses:\n check_status.machine = machine\n print check_status.summary()\n if check_status.status == CheckStatus.STATUS_FAIL:\n session.add(check_status.to_alert())\n session.commit()\n else:\n print\n print CheckStatus.summary_header()\n print \"=\" * 80\n for machine in machines:\n print machine.hostname + \":\"\n print '-' * (len(machine.hostname) + 1)\n check_statuses = machine.perform_checks()\n for check_status in check_statuses:\n print check_status.summary()\n if check_status.status == CheckStatus.STATUS_FAIL:\n session.add(check_status.to_alert())\n session.commit()",
"def save_machines(args):\n session = Session()\n finder = MachineFinder(args.finder)\n machines = finder.find_machines(session)\n print \"Machines Found: %d\" % (len(machines))\n if len(machines) > 0:\n print\n print \"Saving (resetting agent and canceling work) on machines:\"\n for machine in machines:\n sys.stdout.write(machine.hostname + \"...\")\n sys.stdout.flush()\n try:\n machine.cancel_work()\n machine.reset_agent()\n if not machine.online:\n machine.online = True\n except:\n machine.online = False\n session.commit()\n session.query(Alert).filter(Alert.machine_id == machine.id).filter(Alert.resolved == None).update({\"resolved\": datetime.datetime.today(), \"resolution\": \"Agent reset and work cancelled (if any).\"});\n session.commit()\n print \"Done\"",
"def set_hosts(self, host_list: t.List[str]) -> None:\n if isinstance(host_list, str):\n host_list = [host_list.strip()]\n if not isinstance(host_list, list):\n raise TypeError(\"host_list argument must be a list of strings\")\n if not all(isinstance(host, str) for host in host_list):\n raise TypeError(\"host_list argument must be list of strings\")\n # TODO check length\n if self.batch:\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n self.batch_settings.set_hostlist(host_list)\n\n if self.launcher == \"lsf\":\n for db in self.dbnodes:\n db.set_hosts(host_list)\n else:\n for host, db in zip(host_list, self.dbnodes):\n if isinstance(db.run_settings, AprunSettings):\n if not self.batch:\n db.run_settings.set_hostlist([host])\n else:\n db.run_settings.set_hostlist([host])\n\n if db.is_mpmd and hasattr(db.run_settings, \"mpmd\"):\n for i, mpmd_runsettings in enumerate(db.run_settings.mpmd):\n mpmd_runsettings.set_hostlist(host_list[i + 1])",
"def list_machines(args):\n session = Session()\n finder = MachineFinder(args.finder)\n machines = finder.find_machines(session)\n print \"Machines Found: %d\" % (len(machines))\n if len(machines) > 0:\n print\n print PoolMachine.summary_header()\n print \"-\" * 80\n for machine in machines:\n print machine.summary()",
"def machines(self) -> Iterable[dto.Machine]:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def createMachines():\n machines = []\n for i in range(0, num_of_machines):\n cur_machine = Machine(i)\n machines.append(cur_machine)\n return machines",
"def stop_machines(args):\n session = Session()\n finder = MachineFinder(args.finder)\n machines = finder.find_machines(session)\n print \"Machines Found: %d\" % (len(machines))\n if len(machines) > 0:\n print\n print \"Stopping the Agent on machines:\"\n for machine in machines:\n sys.stdout.write(machine.hostname + \"...\")\n sys.stdout.flush()\n machine.stop_agent()\n print \"Done\"",
"def getAllMachines():\n\n validHosts = (subprocess.getoutput(\"qconf -shgrpl\").split())\n machineList = []\n processedHGList = []\n readNodes = False\n for host in validHosts:\n hostMachineList = ((subprocess.getoutput(\"qconf -shgrp_tree \" + str(host))).split())\n for element in hostMachineList:\n if '@' not in element: # If it is not a HG name\n if element not in machineList:\n machineList.append(element) # Searching whole list for this node\n machineList.sort()\n return machineList",
"def all_machines():\n return sorted(MACHINES, key=str)",
"def cmd_MACHINE(self, line):\r\n config = MachineOptions(self.terminal)\r\n try:\r\n config.parseOptions(line)\r\n except usage.UsageError as errortext:\r\n self.terminal.write(\"BUG in usage: {0}\".format(errortext))\r\n else:\r\n if config['list']:\r\n self.callToUserAndDisplay('list_machines', 'admin')\r\n elif config['stats']:\r\n self.callToUserAndDisplay('stats_machine', 'admin',\r\n config['stats'])\r\n elif config['containers']:\r\n self.callToUserAndDisplay('machine_containers', 'admin',\r\n config['containers'])",
"def getMachines(tasks):\n uniqueMachines = []\n for t in tasks:\n if t.machine not in uniqueMachines:\n uniqueMachines.append(t.machine)\n return uniqueMachines",
"def process_all(jobs=None, malocher_dir=\".jobs\", ssh_machines=[], ssh_username=\"pfahler\", ssh_port=22, ssh_private_key=\"/home/pfahler/.ssh/id_rsa\"):\n # repeat usernames, ports and keys if they are not provided as a list\n if not isinstance(ssh_username, list):\n ssh_username = [ssh_username for _ in ssh_machines]\n if not isinstance(ssh_port, list):\n ssh_port = [ssh_port for _ in ssh_machines]\n if not isinstance(ssh_private_key, list):\n ssh_private_key = [ssh_private_key for _ in ssh_machines]\n\n available_machines = Queue()\n remaining_jobs = Queue()\n results = Queue()\n\n def _ssh_call(args, machine=None, job=None):\n #put back machine after the ssh call and recover results\n result = async_ssh(args)\n if result == 0:\n with open(os.path.join(job, \"return.bin\"), \"rb\") as f:\n result = pickle.load(f)\n results.put((job, result))\n else:\n with open(os.path.join(job, \"DONE\"), \"w\") as f:\n f.write(\"ERROR\")\n results.put((job, None))\n available_machines.put(machine)\n\n for m in zip(ssh_machines, ssh_username, ssh_port, ssh_private_key):\n available_machines.put(m)\n if jobs is None:\n for job in os.listdir(malocher_dir):\n if not os.path.exists(os.path.join(malocher_dir, job, \"DONE\")):\n remaining_jobs.put(os.path.abspath(os.path.join(malocher_dir, job)))\n else:\n for job in jobs:\n remaining_jobs.put(job)\n\n \n threads = []\n\n while not remaining_jobs.empty():\n #TODO: Handle Interrupts to kill\n job = remaining_jobs.get(block=True)\n machine = available_machines.get(block=True)\n ssh_address, ssh_username, ssh_port, ssh_private_key = machine\n cmd_dict = {'cmd': f\"{sys.executable} -m malocher {job}\",\n 'address': ssh_address,\n 'ssh_username': ssh_username,\n 'ssh_port': ssh_port,\n 'ssh_private_key': ssh_private_key}\n thread = Thread(target=partial(_ssh_call, machine=machine, job=job), args=[cmd_dict])\n thread.daemon = True\n thread.start()\n threads.append(thread)\n \n try:\n result = results.get(block=False)\n yield result\n except Empty:\n pass\n # TODO: Timeout once in a while to yield new results.\n for thread in threads:\n #TODO: Handle Interrupts to kill\n thread.join()\n yield from iter(list(results.queue))",
"def _UpdateMachineList(self, locked_machines):\n for m in self._experiment.remote:\n if m not in locked_machines:\n self._experiment.remote.remove(m)\n\n for l in self._experiment.labels:\n for m in l.remote:\n if m not in locked_machines:\n l.remote.remove(m)",
"def delete_machines(self):\n logging.debug(\"delete_machines called\")\n \n for machine in self.machines:\n logging.warn(\"Deleting %s\" % machine)\n print \"Deleting %s\" % machine\n cs.servers.delete(self.machines[machine])",
"def create_machines(self):\n logging.debug(\"create_machines called\")\n machines = self.infra.get(\"servers\")\n \n for machine in machines.keys():\n mconf = machines[machine]\n # see if the machine is already up and running\n # print mconf\n uuid = mconf.get(\"uuid\", None) # this is a saved machine state\n machine_name = \"%s%s\" % (self.footprint_name, machine)\n new_machine = Machine(self, mconf, machine)\n if uuid:\n logging.info(\"Machine saved with ID:\" + str(mconf.get(\"uuid\")))\n new_machine.load(uuid)\n if new_machine.cloudserver:\n logging.info(\"Found running server %s\" % uuid)\n continue\n \n logging.info(\"Creating machine %s\" % machine)\n new_machine.create()\n\n self.machines[machine] = new_machine",
"def list_machines(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n\n if verbose:\n attributes = self.engine.all_attributes()\n else:\n attributes = [\"sAMAccountName\", \"objectClass\"]\n\n self.display(self.engine.query(self.engine.COMPUTERS_FILTER(), attributes), verbose, specify_group=False)",
"def reset_machines(args):\n session = Session()\n finder = MachineFinder(args.finder)\n machines = finder.find_machines(session)\n print \"Machines Found: %d\" % (len(machines))\n if len(machines) > 0:\n print\n print \"Resetting Agent on machines:\"\n for machine in machines:\n sys.stdout.write(machine.hostname + \"...\")\n sys.stdout.flush()\n machine.reset_agent()\n print \"Done\"",
"def machine_store(env, machines, name):\r\n store = simpy.Store(env, len(machines))\r\n store.items = machines\r\n store.name = name\r\n \r\n return store",
"def lock_machines(ctx, config):\n # It's OK for os_type and os_version to be None here. If we're trying\n # to lock a bare metal machine, we'll take whatever is available. If\n # we want a vps, defaults will be provided by misc.get_distro and\n # misc.get_distro_version in provision.create_if_vm\n os_type = ctx.config.get(\"os_type\")\n os_version = ctx.config.get(\"os_version\")\n arch = ctx.config.get('arch')\n log.info('Locking machines...')\n assert isinstance(config[0], int), 'config[0] must be an integer'\n machine_type = config[1]\n how_many = config[0]\n # We want to make sure there are always this many machines available\n to_reserve = 5\n\n # change the status during the locking process\n report.try_push_job_info(ctx.config, dict(status='waiting'))\n\n while True:\n # get a candidate list of machines\n machines = lock.list_locks(machine_type=machine_type, up=True,\n locked=False, count=how_many + to_reserve)\n if machines is None:\n if ctx.block:\n log.error('Error listing machines, trying again')\n time.sleep(20)\n continue\n else:\n raise RuntimeError('Error listing machines')\n\n # make sure there are machines for non-automated jobs to run\n if len(machines) < to_reserve + how_many and ctx.owner.startswith('scheduled'):\n if ctx.block:\n log.info(\n 'waiting for more machines to be free (need %s + %s, have %s)...',\n to_reserve,\n how_many,\n len(machines),\n )\n time.sleep(10)\n continue\n else:\n assert 0, ('not enough machines free; need %s + %s, have %s' %\n (to_reserve, how_many, len(machines)))\n\n newly_locked = lock.lock_many(ctx, how_many, machine_type, ctx.owner,\n ctx.archive, os_type, os_version, arch)\n if not newly_locked and not isinstance(newly_locked, list):\n raise RuntimeError('Invalid parameters specified')\n if len(newly_locked) == how_many:\n vmlist = []\n for lmach in newly_locked:\n if misc.is_vm(lmach):\n vmlist.append(lmach)\n if vmlist:\n log.info('Waiting for virtual machines to come up')\n keys_dict = dict()\n loopcount = 0\n while len(keys_dict) != len(vmlist):\n loopcount += 1\n time.sleep(10)\n keys_dict = lock.ssh_keyscan(vmlist)\n log.info('virtual machine is still unavailable')\n if loopcount == 40:\n loopcount = 0\n log.info('virtual machine(s) still not up, ' +\n 'recreating unresponsive ones.')\n for guest in vmlist:\n if guest not in keys_dict.keys():\n log.info('recreating: ' + guest)\n full_name = misc.canonicalize_hostname(guest)\n provision.destroy_if_vm(ctx, full_name)\n provision.create_if_vm(ctx, full_name)\n if lock.do_update_keys(keys_dict):\n log.info(\"Error in virtual machine keys\")\n newscandict = {}\n for dkey in newly_locked.iterkeys():\n stats = lockstatus.get_status(dkey)\n newscandict[dkey] = stats['ssh_pub_key']\n ctx.config['targets'] = newscandict\n else:\n ctx.config['targets'] = newly_locked\n locked_targets = yaml.safe_dump(\n ctx.config['targets'],\n default_flow_style=False\n ).splitlines()\n log.info('\\n '.join(['Locked targets:', ] + locked_targets))\n # successfully locked machines, change status back to running\n report.try_push_job_info(ctx.config, dict(status='running'))\n break\n elif not ctx.block:\n assert 0, 'not enough machines are available'\n\n log.warn('Could not lock enough machines, waiting...')\n time.sleep(10)\n try:\n yield\n finally:\n # If both unlock_on_failure and nuke-on-error are set, don't unlock now\n # because we're just going to nuke (and unlock) later.\n unlock_on_failure = (\n ctx.config.get('unlock_on_failure', False)\n and not ctx.config.get('nuke-on-error', False)\n )\n if get_status(ctx.summary) == 'pass' or unlock_on_failure:\n log.info('Unlocking machines...')\n for machine in ctx.config['targets'].iterkeys():\n lock.unlock_one(ctx, machine, ctx.owner, ctx.archive)",
"def get_all_machines(self, tags=None):\n if tags:\n keys, values = tags.keys(), tags.values()\n filter_keys = map(lambda key: \"tag:\" + key, keys)\n filter_tags = dict(zip(filter_keys, values))\n res = self.conn.get_all_instances(filters=filter_tags)\n else:\n res = self.conn.get_all_instances()\n instances = [i.instances[0] for i in res]\n return [MachineDetails(inst) for inst in instances]",
"def do_add_job(self, args):\n\n if self.hostname_list is None:\n print(\"Error, no machines defined. Use the commnad 'machines'\")\n return\n if self.plugin is None:\n print(\"Error, no plugins defined. Use the 'plugin set' command\")\n return\n\n print(\"***********************************\")\n print(\"********* Job information *********\")\n print(\"***********************************\")\n print(\"Plugin: \", self.plugin.LEET_PG_NAME)\n param = self.plugin.get_plugin_parameters()\n if param is not None:\n print(\"\\tParameters:\")\n for name, value in param.items():\n print(\"\\t\", name, \"=\", value)\n print(\"***********************************\")\n print(\"Amount of machines: \", len(self.hostname_list))\n print(\"Machine list: \", \",\".join(self.hostname_list))\n print(\"***********************************\")\n print(\"The job(s) will be sent for processing.\")\n confirm = input(\"Confirm? (y/n) \")\n if confirm.strip().lower() == \"y\":\n self._leet.schedule_jobs(self.plugin, self.hostname_list)\n print(\"Job scheduled. Cleaning parameters.\")\n self.hostname_list = None\n self.plugin = None\n else:\n print(\"Job cancelled.\")",
"def virtual_machines(self, virtual_machines):\n\n self._virtual_machines = virtual_machines",
"def set_hosts(hostfile='allhosts'):\n\n remote_servers = []\n\n file = open(hostfile, 'r')\n for line in file.readlines():\n remote_servers.append(line.strip('\\r\\n'))\n\n env.hosts = remote_servers",
"def list_machines(request):\n auth_context = auth_context_from_request(request)\n cloud_id = request.matchdict['cloud']\n # SEC get filtered resources based on auth_context\n try:\n cloud = Cloud.objects.get(owner=auth_context.owner,\n id=cloud_id, deleted=None)\n except Cloud.DoesNotExist:\n raise NotFoundError('Cloud does not exist')\n\n machines = methods.filter_list_machines(auth_context, cloud_id)\n\n if cloud.machine_count != len(machines):\n try:\n tasks.update_machine_count.delay(\n auth_context.owner.id, cloud_id, len(machines))\n except Exception as e:\n log.error('Cannot update machine count for user %s: %r' %\n (auth_context.owner.id, e))\n\n return machines",
"def get_machines(self):\n\n return self._machine_manager.get_machines()",
"def set_all(self, host_names, ip_address):\n for host_name in host_names:\n self.set_one(host_name, ip_address)",
"def batchLoopExec(serverList, cmdList):\n\n for server in serverList:\n #env.hosts = [ server['host'] ]\n env.host_string = server['host']\n env.port = server['port']\n env.user = server['user']\n env.password = server['password']\n for cmd in cmdList:\n exeCmd(cmd)",
"def CreateMachines(num_instances, token, os, browser, browser_version,\n download_info, retries=0):\n logging.info('\\n'.join(['num_instances: %d', 'token: %s', 'os: %d',\n 'browser: %d', 'browser_version: %s']),\n num_instances, token, os, browser, browser_version)\n\n ec2 = ec2_manager.EC2Manager()\n user_data = simplejson.dumps({'channel': browser_version,\n 'os': OS_TO_USER_DATA[os],\n 'token': token,\n 'download_info': download_info})\n logging.info('Spawning EC2 machines.')\n # Note: All exceptions are caught here because the EC2 API could fail after\n # successfully starting a machine. Because this task is rescheduled on\n # failure, we need to make sure we don't keep spawning EC2 machines.\n try:\n bots_instances = ec2.StartAmiWithOs(\n os, count=num_instances, instance_type=DEFAULT_INSTANCE_SIZE,\n user_data=user_data)\n except Exception:\n logging.exception('Something failed when setting up the EC2 instance. '\n 'Stopping setup for this instance.')\n return\n\n logging.info('Creating the corresponding ClientMachine models.')\n new_instances = []\n for instance in bots_instances:\n new_instances.append(client_machine.ClientMachine(\n vm_service=enum.VM_SERVICE.EC2, os=os, browser=browser,\n browser_version=browser_version, client_id=instance.inst_id,\n status=enum.MACHINE_STATUS.PROVISIONED, retry_count=retries,\n token=token, download_info=download_info))\n\n db.put(new_instances)\n logging.info('Finished creating the ClientMachine models.')"
] |
[
"0.6619454",
"0.6044773",
"0.6034794",
"0.58789426",
"0.5867438",
"0.584151",
"0.5819749",
"0.5793482",
"0.5762725",
"0.57339203",
"0.5671499",
"0.5636976",
"0.5603591",
"0.5599037",
"0.5590458",
"0.55259943",
"0.55078816",
"0.5486513",
"0.5482602",
"0.54447526",
"0.5425394",
"0.5420862",
"0.53893316",
"0.5364574",
"0.5312887",
"0.5294071",
"0.522376",
"0.5217253",
"0.52038217",
"0.5178304"
] |
0.7748249
|
0
|
Shows a summary of the status of the jobs.
|
def do_status(self, args):
status = self._leet.job_status
for job in self.finished_jobs:
status.append({"id" : job.id,
"hostname" : job.machine.hostname,
"plugin": job.plugin_instance.LEET_PG_NAME,
"status" : job.status})
if status:
pretty_jobs_status(status)
else:
print("***No jobs pending")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def pretty_jobs_status(jobs):\n print(tabulate.tabulate(jobs, headers=\"keys\"))",
"def _status_summary(jobs):\n assert type(jobs) == list\n successful = 0\n pending = 0\n running = 0\n coalesced = 0\n\n for job in jobs:\n status = buildapi.query_job_status(job)\n if status == buildapi.PENDING:\n pending += 1\n if status == buildapi.RUNNING:\n running += 1\n if status == buildapi.SUCCESS:\n successful += 1\n if status == buildapi.COALESCED:\n coalesced += 1\n\n return (successful, pending, running, coalesced)",
"def _get_job_status(self):\n total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()\n num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()\n total_urls = self.num_urls\n num_urls_left = session.query(VideoTrainingURL).filter_by(job=self, processed=False).count()\n faces_obtained = MTurkBox.query.filter_by(label=self.evaluator.target_label, result=True).count()\n return '\\n'.join([\n '------------- Stats for Job ID: %s -------------' % str(self.id) ,\n 'Job for Label : %s' % self.label.name,\n 'Total URLs : %d' % total_urls,\n 'Total HITs : %d' % total_hits,\n 'unprocessed URLS : %d' % num_urls_left,\n 'outstanding Hits : %d' % num_hits_left,\n 'Job Finish Status : %s' % self.finished,\n 'Faces Obtained : %d' % faces_obtained,\n ]) + '\\n'",
"def launch_status(self):\n print(\n f\"Starting job with {len(self.fe.get_network())} jobs total. \",\n end=\"\\r\",\n )",
"def summarize(self, jobId):\n jobInfo = self.jobs[jobId]\n state = jobInfo['state']\n return 'State=%s Elapsed=%s' % (\n jobInfo['state'], jobInfo['elapsed'])",
"def statusJob(self, job):\n with self.thread_lock:\n name = job.name\n job_container = self.shared_dags[job]\n job_dag = job_container.getDAG()\n\n # If there is no timing, then the job is not finished\n if job_container.getTime():\n job_container.addCaveat('time: ' + job_container.getTime())\n if job.getResult() == False:\n self.active.remove(job)\n self.killJobs()\n return\n else:\n self.job_queue_count -= 1\n job_dag.delete_node(job)\n self.active.remove(job)\n if self.args.download_only:\n result = ' -Downloaded | '\n else:\n result = ' --Finished | '\n\n else:\n result = ' Launching | '\n\n # Format job name length field\n name_cnt = (self.term_width - len(job.name)) + 2 # 2 character buffer\n result = strftime(\"%H:%M\") + result + job.name + ' '*name_cnt\n\n # Format caveat length\n caveats = job_container.getCaveats()\n caveat_cnt = self.max_caveat_length - len(caveats)\n\n if caveats:\n result = result + caveats + ' '*caveat_cnt\n else:\n result = result + ' '*caveat_cnt\n\n remaining = job_dag.size()\n print(result, \"remaining: %-3d active: %-2d\" % (remaining, len(self.active)), [x.name for x in self.active])",
"def emit_status(self):\n next_job_count = len(self.fe.get_next_jobs())\n if next_job_count:\n emoji = \"🤔\"\n else:\n emoji = \"👌\"\n remaining = len(self.fe.get_current_network())\n\n pct = (self.total_job_count - remaining) / self.total_job_count\n print(\n f\"{emoji} ———— {next_job_count} jobs running, {remaining} remaining ({int(100*pct)}%). \",\n end=\"\\r\",\n )",
"def job_status(self, job_id):\n url = self.base_url + \"/ml-service/phoenix-ml/job/status?id={0}\".format(job_id)\n # url = \"http://10.1.2.110:8199/phoenix-ml/job/status?id=12\"\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.text",
"def publish_summary(self, jobs):\n pass",
"def get_job_overview_string(self, mission):\n\n # get statuses\n job_status, task_status = self.get_job_status(mission)\n\n s = \"Job status: {}\".format(job_status)\n\n if job_status != \"N/A\":\n s += \"\\n\"\n s += \"Tasks status: \"\n s += \"{} active; \".format(task_status[\"active\"])\n s += \"{} running; \".format(task_status[\"running\"])\n s += \"{} succeeded; \".format(task_status[\"succeeded\"])\n s += \"{} failed;\".format(task_status[\"failed\"])\n\n return s",
"def status(self) -> str:\n return self._check_job_status()",
"def status(self):\n logging.debug(\"%s entered status\" % self)\n # print_config(self.infra)\n # print self.images\n # headers = [\"Machine Name\", \"Flavor\", \"IP Addresses\", \"Image Name\", \"Status\"]\n # pt = prettytable.PrettyTable(headers)\n # pt.align[\"Machine Name\"]=\"l\"\n # pt.align[\"IP Addresses\"] = \"l\"\n # pt.align[\"Image Name\"] = \"l\"\n # pt.align[\"Status\"] = \"r\"\n \n print \"Checking status of %s\" % self.footprint_name\n # tmpl = \"%(machine_name)-20s%(flavor)5s%(status)-30s\"\n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-60s\\n\"\"\"\n print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n print 80 * \"-\"\n \n for machine in self.machines.keys():\n m = self.machines[machine]\n # machine_name = m.machine_name\n # ips = str(m.ip_addresses)\n # flavor = str(m.flavor)\n # img = str(m.image_id)\n # status = str(m.status)\n # pt.add_row([m, ips, status, img, status])\n # print \"FFF\", m, ips, flavor, img, status\n # print tmpl % locals()\n print m.status\n \n return \"%s is currently: %s\" % (self.footprint_name, self.footprint_status)",
"def report(self):\n\n job_summary = {}\n for job in self._jobs:\n \n if job.step_name not in job_summary:\n job_summary[ job.step_name ] = {}\n job_summary[ job.step_name ][ 'DONE' ] = 0\n job_summary[ job.step_name ][ 'RUNNING' ] = 0\n job_summary[ job.step_name ][ 'QUEUING' ] = 0\n job_summary[ job.step_name ][ 'FAILED' ] = 0\n job_summary[ job.step_name ][ 'UNKNOWN' ] = 0\n job_summary[ job.step_name ][ 'max_mem' ] = 0\n job_summary[ job.step_name ][ 'cputime' ] = 0\n\n if job.status == Job_status.FINISHED:\n job_summary[ job.step_name ][ 'DONE' ] += 1\n if job.cputime is not None:\n job_summary[ job.step_name ]['cputime'] += int(job.cputime)\n\n if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:\n job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)\n\n elif job.status == Job_status.RUNNING:\n job_summary[ job.step_name ][ 'RUNNING' ] += 1\n elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:\n job_summary[ job.step_name ][ 'QUEUING' ] += 1\n elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:\n job_summary[ job.step_name ][ 'FAILED' ] += 1\n else:\n job_summary[ job.step_name ][ 'UNKNOWN' ] += 1\n\n\n\n local_time = strftime(\"%d/%m/%Y %H:%M\", time.localtime())\n \n\n pickle_file = \"{}.{}\".format(self.pipeline.project_name, self.pipeline._pid)\n\n print(\"[{} @{} {}]\".format( local_time,self.pipeline._hostname , pickle_file))\n\n print(\"{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}\".format(\"Run stats\", \"Runtime\", \"Max Mem\", \"D\",\"R\",\"Q\",\"F\",\"U\"))\n\n for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):\n if step not in job_summary:\n continue\n\n print(\"{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}\".format(step, \n self.format_time(job_summary[ step ]['cputime']),\n self.format_memory(job_summary[ step ]['max_mem']),\n job_summary[ step ][ 'DONE' ],\n job_summary[ step ][ 'RUNNING' ],\n job_summary[ step ][ 'QUEUING' ],\n job_summary[ step ][ 'FAILED' ],\n job_summary[ step ][ 'UNKNOWN' ]))",
"def describe_job(self):\n # GET /jobs/{job_id}\n pass",
"def updater_job_status(self,request):\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\t# First check if a job is running. This will update the\n\t\t# internal field self._current_job, or if the job is finished,\n\t\t# it would return an empty string.\n\t\tinst = self.__which_job_is_running()\n\n\t\tjob = request.options.get('job','')\n\t\tresult = {}\n\t\tif job in INSTALLERS:\n\t\t\t# make a copy, not a reference!\n#\t\t\tresult = {}\n#\t\t\tfor arg in INSTALLERS[job]:\n#\t\t\t\tresult[arg] = INSTALLERS[job][arg]\n\t\t\tresult = deepcopy(INSTALLERS[job])\n\n\t\t\tif 'statusfile' in INSTALLERS[job]:\n\t\t\t\ttry:\n\t\t\t\t\tfor line in open(INSTALLERS[job]['statusfile']):\n\t\t\t\t\t\tfields = line.strip().split('=')\n\t\t\t\t\t\tif len(fields) == 2:\n\t\t\t\t\t\t\tresult['_%s_' % fields[0]] = fields[1]\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t# if we encounter that the frontend asks about the last job we\n\t\t\t# have executed -> include its properties too.\n\t\t\tif self._current_job:\n\t\t\t\tif self._current_job['job'] == job:\n\t\t\t\t\tfor f in self._current_job:\n\t\t\t\t\t\tresult[f] = self._current_job[f]\n\t\t\t\t\t\tif isinstance(result[f],str) and result[f].isdigit():\n\t\t\t\t\t\t\tresult[f] = int(result[f])\n\t\t\t\tif inst == '':\n\t\t\t\t\tresult['running'] = False\n\t\t\telse:\n\t\t\t\t# no job running but status for release was asked? \n\t\t\t\t# maybe the server restarted after job finished\n\t\t\t\t# and the frontend did not get that information\n\t\t\t\t# Bug #26318\n\t\t\t\tif job == 'release':\n\t\t\t\t\tresult['detail'] = '%s-%s' % (self.ucr.get('version/version'), self.ucr.get('version/patchlevel'))\n\t\t\t\telse:\n\t\t\t\t\tresult['detail'] = _('Unknown')\n\n\t\t\t# -------------- additional fields -----------------\n\n\t\t\t# elapsed time, ready to be displayed. (not seconds, but rather\n\t\t\t# the formatted string)\n\t\t\tif 'time' in result and 'started' in result:\n\t\t\t\telapsed = result['time'] - result['started']\n\t\t\t\tif elapsed < 60:\n\t\t\t\t\tresult['elapsed'] = '%ds' % elapsed\n\t\t\t\telse:\n\t\t\t\t\tmins = int(elapsed/60)\n\t\t\t\t\tsecs = elapsed - (60 * mins)\n\t\t\t\t\tif mins < 60:\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02dm' % (mins,secs)\n\t\t\t\t\telse:\n\t\t\t\t\t\thrs = int(mins/60)\n\t\t\t\t\t\tmins = mins - (60*hrs)\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02d:%02dh' % (hrs,mins,secs)\n\t\t\t# Purpose is now formatted in the language of the client (now that\n\t\t\t# this LANG is properly propagated to us)\n\t\t\tif 'purpose' in result:\n\t\t\t\tif result['purpose'].find('%') != -1:\n\t\t\t\t\t# make sure to not explode (Bug #26318), better show nothing\n\t\t\t\t\tif 'detail' in result:\n\t\t\t\t\t\tresult['label'] = result['purpose'] % result['detail']\n\t\t\t\telse:\n\t\t\t\t\tresult['label'] = result['purpose']\n\t\t\t# Affordance to reboot... hopefully this gets set before\n\t\t\t# we stop polling on this job status\n\t\t\tself.ucr.load()\t# make it as current as possible\n\t\t\tresult['reboot'] = self.ucr.is_true('update/reboot/required',False)\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id,result)",
"def job_status(self, job_id):\n\n response = self.batch_client.describe_jobs(jobs=[job_id])\n return response[\"jobs\"][0][\"status\"]",
"def info(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s\" % (self.sessionid,\n self.name))\n return self.server.json_body(resp)",
"def status(self):\n self.scion_sh('status')",
"def status(self):\n return self.job_proto.status",
"def display_jobs(self, parsable=False, hostname=None):\n\n job_status = [\"U\", \"I\", \"R\", \"X\", \"C\", \"H\", \"E\"]\n\n def display_metric(classad, parsable):\n status = job_status[int(classad[\"JobStatus\"])]\n\n next_run_time = \"UNKNOWN\"\n deferral_time = 0\n try:\n probe_interval = int(classad['OSGRSVProbeInterval'])\n deferral_time = int(classad['EnteredCurrentStatus']) + probe_interval\n except (KeyError, TypeError, ValueError):\n try:\n deferral_time = int(classad['DeferralTime'])\n except (KeyError, TypeError, ValueError):\n pass\n\n if deferral_time:\n if parsable:\n next_run_time = strftime(\"%Y-%m-%d %H:%M:%S %Z\", time.localtime(deferral_time))\n else:\n next_run_time = strftime(\"%m-%d %H:%M\", time.localtime(deferral_time))\n\n metric = \"UNKNOWN?\"\n if \"OSGRSVMetric\" in classad:\n metric = classad[\"OSGRSVMetric\"].strip('\"')\n\n owner = classad[\"Owner\"].replace('\"', \"\")\n\n if parsable:\n output = \"%s.%s | %s | %s | %s | %s\\n\" % (classad[\"ClusterId\"], classad[\"ProcId\"],\n owner, status, next_run_time, metric)\n else:\n output = \"%5s.%-1s %-10s %-2s %-15s %-44s\\n\" % (classad[\"ClusterId\"], classad[\"ProcId\"],\n owner, status, next_run_time, metric)\n \n return metric, output\n\n\n #\n # Build a table of jobs for each host\n #\n hosts = {}\n running_metrics = {}\n classads = self.get_classads(\"OSGRSV==\\\"metrics\\\"\")\n\n if not classads:\n if parsable:\n self.rsv.echo(\"ERROR: Condor-cron is running but no RSV metrics are running\")\n else:\n self.rsv.echo(\"No metrics are running\")\n else:\n for classad in classads:\n host = \"UNKNOWN?\"\n if \"OSGRSVHost\" in classad:\n host = classad[\"OSGRSVHost\"].strip('\"')\n\n if hostname and hostname != host:\n continue\n\n if host not in hosts:\n running_metrics[host] = []\n hosts[host] = \"Hostname: %s\\n\" % host\n if not parsable:\n hosts[host] += \"%7s %-10s %-2s %-15s %-44s\\n\" % \\\n (\"ID\", \"OWNER\", \"ST\", \"NEXT RUN TIME\", \"METRIC\")\n\n (metric, text) = display_metric(classad, parsable)\n running_metrics[host].append(metric)\n hosts[host] += text\n\n # Add in any hosts that have ALL their metric missing\n for host in self.rsv.get_hosts():\n if host not in hosts:\n hosts[host] = \"Hostname: %s\\n\\tThis host has no running metrics.\\n\" % host\n running_metrics[host] = []\n\n\n self.rsv.echo(\"\") # get a newline to separate output from command\n for host in hosts:\n self.rsv.echo(hosts[host])\n\n # Determine if any metrics are enabled on this host, but not running\n missing_metrics = []\n enabled_metrics = Host.Host(host, self.rsv).get_enabled_metrics()\n for metric in enabled_metrics:\n if metric not in running_metrics[host]:\n missing_metrics.append(metric)\n\n if missing_metrics:\n if parsable:\n self.rsv.echo(\"MISSING: \" + \" | \".join(missing_metrics))\n else:\n self.rsv.echo(\"WARNING: The following metrics are enabled for this host but not running:\\n%s\\n\" %\n \" \".join(missing_metrics))\n\n \n #\n # Show the consumers also if a specific hostname was not requested\n #\n if not hostname and not parsable:\n classads = self.get_classads(\"OSGRSV==\\\"consumers\\\"\")\n running_consumers = []\n if not classads:\n self.rsv.echo(\"No consumers are running\")\n else:\n self.rsv.echo(\"%7s %-10s %-2s %-30s\" % (\"ID\", \"OWNER\", \"ST\", \"CONSUMER\"))\n\n for classad in classads:\n status = job_status[int(classad[\"JobStatus\"])]\n owner = classad[\"Owner\"].replace('\"', \"\")\n consumer = classad[\"OSGRSVUniqueName\"].replace('\"', \"\")\n running_consumers.append(consumer)\n self.rsv.echo(\"%5s.%-1s %-10s %-2s %-30s\" % (classad[\"ClusterId\"], classad[\"ProcId\"],\n owner, status, consumer))\n\n # Display a warning if any consumers are enabled but not running\n enabled_consumers = self.rsv.get_enabled_consumers()\n missing_consumers = []\n for consumer in enabled_consumers:\n if consumer.name not in running_consumers:\n missing_consumers.append(consumer.name)\n\n if missing_consumers:\n self.rsv.echo(\"\\nWARNING: The following consumers are enabled but not running:\\n%s\\n\" %\n \" \".join(missing_consumers))\n\n return True",
"def refresh_queue_status(self):\n \n # Get the jobid and state for all jobs pending/running/completed for the current user\n qacct_stdout=self.run_grid_command_resubmit([\"qacct\",\"-o\",getpass.getuser(),\"-j\",\"*\"])\n \n # info list should include jobid, state, cpus, time, and maxrss\n info=[]\n job_status=[]\n for line in qacct_stdout.split(\"\\n\"):\n if line.startswith(\"jobnumber\") or line.startswith(\"job_number\"):\n if job_status:\n info.append(job_status)\n job_status=[line.rstrip().split()[-1],\"NA\",\"NA\",\"NA\",\"NA\"]\n # get the states for completed jobs\n elif line.startswith(\"failed\"):\n failed_code = line.rstrip().split()[1]\n if failed_code != \"0\":\n if failed_code in [\"37\",\"100\"]:\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n elif line.startswith(\"deleted_by\"):\n if line.rstrip().split()[-1] != \"NONE\" and job_status[1] == self.job_code_terminated:\n job_status[1]=self.job_code_deleted\n elif line.startswith(\"exit_status\"):\n # only record if status has not yet been set\n if job_status[1] == \"NA\":\n exit_status = line.rstrip().split()[-1]\n if exit_status == \"0\":\n job_status[1]=self.job_code_completed\n elif exit_status == \"137\":\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n # get the current state for running jobs\n elif line.startswith(\"job_state\"):\n job_status[1]=line.rstrip().split()[-1]\n elif line.startswith(\"slots\"):\n job_status[2]=line.rstrip().split()[-1]\n elif line.startswith(\"ru_wallclock\"):\n try:\n # get the elapsed time in minutes\n job_status[3]=str(float(line.rstrip().split()[-1])/60.0)\n except ValueError:\n job_status[3]=\"NA\"\n elif line.startswith(\"ru_maxrss\"):\n job_status[4]=line.rstrip().split()[-1]+\"K\"\n \n if job_status:\n info.append(job_status)\n\n return info",
"def stats(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/stats\" %\n (self.sessionid, self.name))\n return self.server.json_body(resp)",
"def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)",
"def display_status(self):\n time = float2str(self.scheduler.time, '10.2f')\n tx = float2str(self.tx_total, '10g')\n rx = float2str(self.rx_total, '10g')\n dup = float2str(self.dup_total, '10g')\n uniq_total = float2str(self.uniq_total, '10g')\n delivered_total = float2str(self.delivered_total, '10g')\n uniq_delivered_total = float2str(self.uniq_delivered_total, '10g')\n print(\n 'define status_l text Time:{},____TX:{},____RX:{},____DUP:{},____Delivered:{}__/__{},____Arrived:{} 14 white 0.5 0.05'\n .format(time, tx, rx, dup, uniq_delivered_total, uniq_total,\n delivered_total))",
"def print_result(job_managers: 'list[job_manager.JobManager]'):\n info(\"Number of jobs run {}.\".format(len(job_managers)))\n failed_jobs = 0 # type: int\n for job_item in job_managers:\n if job_item.status.job_state != utils.JobState.COMPLETE:\n failed_jobs += 1\n warning(\n \"job {} failed because {} : {}\".format(\n job_item.job_id,\n job_item.status.job_state,\n job_item.status.message))\n\n if failed_jobs == 0:\n info(\"All jobs ran successfully.\")\n\n else:\n info(\"Number of jobs passed {} out of {}.\".format(\n len(job_managers) - failed_jobs, len(job_managers)))",
"def print_jobs(jobs):\n if len(jobs) > 0:\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(jobs)\n else:\n print('No jobs to print out')",
"def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()",
"def status(self, job_id: str) -> dict:\n session = self._session()\n response = session.get(self._status_url(job_id))\n if response.ok:\n fields = [\n 'status', 'message', 'progress', 'createdAt', 'updatedAt', 'request',\n 'numInputGranules'\n ]\n status_subset = {k: v for k, v in response.json().items() if k in fields}\n return {\n 'status': status_subset['status'],\n 'message': status_subset['message'],\n 'progress': status_subset['progress'],\n 'created_at': dateutil.parser.parse(status_subset['createdAt']),\n 'updated_at': dateutil.parser.parse(status_subset['updatedAt']),\n 'request': status_subset['request'],\n 'num_input_granules': int(status_subset['numInputGranules']),\n }\n else:\n response.raise_for_status()",
"def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()",
"def test_status(self):\n\n url = '/%s/jobs/?status=RUNNING' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['id'], self.job1.job_type.id)"
] |
[
"0.7700928",
"0.7499587",
"0.74649817",
"0.7133787",
"0.71303564",
"0.70185477",
"0.6993843",
"0.69682664",
"0.6882302",
"0.6827677",
"0.67964417",
"0.6733884",
"0.6703268",
"0.6693207",
"0.6646178",
"0.65613914",
"0.6535507",
"0.65271467",
"0.65130925",
"0.64981896",
"0.6481402",
"0.6478024",
"0.64699256",
"0.644681",
"0.64410746",
"0.63905036",
"0.63846654",
"0.63770646",
"0.63737303",
"0.63617486"
] |
0.7609351
|
1
|
Cancel all pending jobs
|
def do_cancel_all_jobs(self, args):
self._leet.cancel_all_jobs()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _cancel_all_jobs(self):\n status = self._get_status_obj()\n s = SLURM()\n for job_id in status.job_ids:\n s.scancel(job_id)\n logger.info('Pipeline job \"{}\" cancelled.'.format(self._config.name))",
"def cancel_all_instances(self):\n if not self.is_job:\n return\n\n for job_instance in self.instances:\n job_instance.cancel()\n self.status = 'CANCELED'",
"def cancel(self):\n import googleapiclient\n\n # projects.locations.operations/cancel\n operations = self._api.projects().locations().operations()\n\n for job in self.active_jobs:\n request = operations.cancel(name=job.jobname)\n logger.debug(\"Cancelling operation {}\".format(job.jobid))\n try:\n self._retry_request(request)\n except (Exception, BaseException, googleapiclient.errors.HttpError):\n continue\n\n self.shutdown()",
"def cancel_workers(self):\n pass",
"def cancel_all():\n\twhile _running:\n\t\t_running[0].cancel(noerror=True)",
"def cancel_all(cls, pipeline):\n\n pipe = cls(pipeline)\n pipe._cancel_all_jobs()",
"def stop_slurm_jobs():\n for job_id in slurm_jobs_scheduled:\n logger.info(\"Canceling previously scheduled job %s...\", job_id)\n cancel_command = [\"scancel\", str(job_id)]\n print(\" \".join(shlex.quote(part) for part in cancel_command))\n run(cancel_command, check=False)",
"def cancelJob(_id):\n job = mongo.db.jobs.find_one({'_id': _id})\n tasks = mongo.db.tasks.find({'job': _id})\n for each in tasks:\n _t = ca.AsyncResult(each.get('ctid'))\n _t.revoke()\n job['status'] = 'cancelled'\n \"\"\"Set status of job to cancelled\"\"\"\n mongo.db.jobs.update({'_id': _id}, job)\n \"\"\"Bulk update tasks\"\"\"\n bulk = mongo.db.tasks.initialize_unordered_bulk_op()\n bulk.find({'job': _id, 'status': {'$ne': 'completed'}}).update({\n '$set': {\n 'status': \"cancelled\",\n 'cancelled_on': now(),\n 'slave': None,\n }})\n bulk.execute()\n\n return {'info': 'success'}",
"def scancel(self, arg):\n\n if isinstance(arg, (list, tuple)):\n for job_id in arg:\n self.scancel(job_id)\n\n elif str(arg).lower() == 'all':\n self._queue = None\n for job_id in self.queue_job_ids:\n self.scancel(job_id)\n\n elif isinstance(arg, (int, str)):\n cmd = ('scancel {}'.format(arg))\n cmd = shlex.split(cmd)\n subprocess.call(cmd)\n\n else:\n e = ('Could not cancel: {} with type {}'\n .format(arg, type(arg)))\n logger.error(e)\n raise ExecutionError(e)",
"def kill_pending(self):\n for req in self._outbox:\n if not req.Test():\n req.Cancel()\n self._outbox = []",
"def killJobs(self):\n self.worker_pool.close()\n self.status_pool.close()\n self.failure = True\n for job in self.active:\n try:\n job.killJob()\n except AttributeError:\n raise SchedulerError('killJob method is not defined')\n except: # Job already terminated\n pass\n self.job_queue_count = 0",
"def cancel_tasks(self) -> None:\n for group in self._queue.values():\n for expected_response in group.values():\n expected_response.set(None)\n self._queue = defaultdict(OrderedDict)",
"async def _exit_jobs() -> None:\n logger.info(f\"Exiting {len(tasks)} active jobs.\")\n\n for task in tasks:\n task.cancel()\n\n await asyncio.gather(*tasks, return_exceptions=True)",
"def stop(self):\n if len(self.jobs) > 0:\n cmd = [\"scancel\"]+[str(j) for j in self.jobs]\n output = subprocess.check_output(cmd).decode(\"utf-8\")\n print(output, file=sys.stderr)",
"def cancel(self):\n\n query = f\"scancel {self.jobid}\"\n if self.cluster:\n query = f\"scancel {self.jobid} --clusters={self.cluster}\"\n\n cmd = BuildTestCommand(query)\n cmd.execute()\n logger.debug(f\"Cancelling Job: {self.jobid} by running: {query}\")\n\n self.poll()\n self._state = \"CANCELLED\"",
"def cancel(self):\n\t\treturn Job(SDK.PrlJob_Cancel(self.handle)[0])",
"def cancel_tasks(self):\n # tasks will be removed by the done callback.\n for t in self._tasks.values():\n t.cancel()",
"async def cancel(self):\n\n await self.cb_0.cancel()\n await self.cb_1.cancel()",
"def cancel(self):\n self.waiter.set_result_if_pending([])\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()",
"def killall(self):\n\n for job_id, job in self.jobs:\n backend.kill( job )",
"def cancel():",
"def cli(ctx, job_id):\n return ctx.gi.jobs.cancel_job(job_id)",
"def cancel_all(self):\n for timer in self._timers:\n timer.Stop()",
"def cancel_job(self, job_number):\n raise NotImplementedError",
"async def cancel_tasks(self) -> None:\n if self._qos_task:\n self._qos_task.cancel()\n try:\n await self._qos_task\n except asyncio.CancelledError:\n pass\n self._qos_task = None\n if self._expire_task:\n self._expire_task.cancel()\n try:\n await self._expire_task\n except asyncio.CancelledError:\n pass\n self._expire_task = None",
"def cancel_search(job):\n job.cancel()",
"def cancel(self):\n if not self.parent_node.is_job:\n return\n\n # First perform clean operation\n self.clean()\n\n self.winstance.send_event('Cancelling job..')\n result = self.winstance.execute_operation('hpc.interfaces.'\n 'lifecycle.cancel',\n kwargs={\"name\": self.name})\n self.winstance.send_event('.. job canceled')\n result.task.wait_for_terminated()\n\n self._status = 'CANCELLED'",
"def clean_all_instances(self):\n if not self.is_job:\n return\n\n for job_instance in self.instances:\n job_instance.clean()\n self.status = 'CANCELED'",
"def cancel_async_tasks():\n tornado.ioloop.IOLoop.instance().stop()",
"def do_cancelall(self,args):\n try:\n bitstamp.cancel_all()\n except Exception as e:\n print \"Unexpected Error: %s\" % e\n self.onecmd('help cancelall')"
] |
[
"0.81436586",
"0.79760283",
"0.793128",
"0.7752383",
"0.7634517",
"0.7336057",
"0.7278366",
"0.7217065",
"0.71379155",
"0.7020462",
"0.6918962",
"0.6903983",
"0.68986064",
"0.6874834",
"0.6870926",
"0.68692666",
"0.6866242",
"0.6815571",
"0.68097174",
"0.6798725",
"0.6777318",
"0.6741624",
"0.6723938",
"0.6704299",
"0.66846013",
"0.6674979",
"0.6640759",
"0.66343147",
"0.66144204",
"0.6606886"
] |
0.83705556
|
0
|
Using Mapzen Search API to attempt to geocode a location string
|
def geocode(location):
GeoDict = parse_mapzen_response(fetch_mapzen_response(location))
GeoDict['query_text'] = location
return GeoDict
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def geocode(location):\n\n\ttxt = fetch_mapzen_response(location)\n\tmydict = parse_mapzen_response(txt)\n\tmydict['query_text'] = location\n\treturn mydict",
"def geocode(addr_str):\n\n\tbase_url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n\turl_template = '{0}?token={1}&input={2}&form=json'\n\turl = url_template.format(base_url, token, addr_str)\n\tresponse = requests.get(url)\n\n\tif response.status_code != 200:\n\t\tprint 'unable to establish connection with rlis api'\n\t\tprint 'status code is: {0}'.format(response.status_code)\n\t\treturn response.status_code\n\t\n\tjson_rsp = response.json()\n\tif json_rsp['error']:\n\t\tprint 'the following address could not be geocoded:'\n\t\tprint '\\'{0}\\''.format(addr_str)\n\t\tprint 'the following error message was returned:'\n\t\tprint '\\'{0}\\''.format(json_rsp['error']), '\\n'\n\telse:\n\t\treturn json_rsp['data'][0]",
"def geocode():\n\n if \"location\" in request.vars:\n location = request.vars.location\n else:\n session.error = T(\"Need to specify a location to search for.\")\n redirect(URL(r=request, f=\"index\"))\n\n if \"service\" in request.vars:\n service = request.vars.service\n else:\n # @ToDo: service=all should be default\n service = \"google\"\n\n if service == \"google\":\n return s3base.GoogleGeocoder(location, db).get_kml()\n\n if service == \"yahoo\":\n return s3base.YahooGeocoder(location, db).get_xml()",
"def geocoding(address, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n # define the parameters of the search\n params = {\n 'address': '{}'.format(address),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n\n geodata = parse_response(response)\n return geodata",
"def geocode(location_dict):\n query = \"https://nominatim.openstreetmap.org/search\"\n\n if location_dict.get(' country_code') != \" \": #ISO 3166-1alpha2 code\n query += \"countrycodes=\" + location_dict.get(' country_code')[1:] + \"&\"\n if location_dict.get(' city_name') != \" \":\n query += \"?city=\" + location_dict.get(' city_name')[1:] + \"&\"\n # if location_dict.get(\" zip_code\") != \"\":\n # query += \"postalcode=\" + location_dict(\" zip_code\")[1:] + \"&\"\n\n else:\n query += \"?q=\" + location_dict.get(\"name\").replace(\n \"-\", \" \") + \"&\" # second try?\n if location_dict.get('street_address') != \" \":\n query += \"?street=\" + location_dict.get('street_address') + \"&\"\n\n return requests.get(query + \"&format=json&limit=1\").json()",
"def geocodeLocations(locations):\n maxResults = 1\n location_query = ''\n for location in locations:\n location_query += \"&location=%s\" % encodeUrl(location)\n url = \"http://open.mapquestapi.com/geocoding/v1/batch?maxResults=%d%s\" % (maxResults, location_query)\n print url\n results = json.loads(urllib2.urlopen(url).read())\n print results\n return\n for location_result in results['results']:\n #print location_result\n if location_result['providedLocation']['location'] == location:\n latlng = location_result['locations'][0]['displayLatLng']\n return latlng\n else:\n print location_result",
"def rlis_geocode(addr_str, token):\n\n url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n params = {\n 'token': token,\n 'input': addr_str,\n 'form': 'json'\n }\n rsp = requests.get(url, params=params)\n\n if rsp.status_code != 200:\n return -1, -1, -1\n else:\n json_rsp = rsp.json()\n if json_rsp['error']:\n return -1, -1, -1\n else:\n return json_rsp['data'][0]['lat'], json_rsp['data'][0]['lng'], json_rsp['data'][0]['fullAddress']",
"def fetch_mapzen_response(location):\n\tmykey = read_mapzen_credentials()\n\tmyparams = {'text': location, 'api_key': mykey}\n\turl = \"https://search.mapzen.com/v1/search?\"\n\tresp = requests.get(url, params=myparams)\n\treturn resp.text",
"def geocode(location):\n geocoding_url = f'https://maps.googleapis.com/maps/api/geocode/json?' \\\n f'address={location}&key={_GEOCODING_KEY}'\n geocode_data = requests.get(geocoding_url).json()\n return geocode_data",
"def geocode(address):\n geo_data = requests.get(\"https://geocode.xyz/{}?json=1\".format(\n urllib.parse.quote_plus(address)))\n geo_json = json.loads(geo_data.content)\n\n return geo_json['standard']['city'], geo_json['latt'], geo_json['longt']",
"def lookup_lat_lon(location_string):\n google_api_key = \"AIzaSyDAJxRxTE-ZC5M7qGN5Bg_FXwgc5e_TqdU\" \n \n\n base = \"https://maps.googleapis.com/maps/api/geocode/json?address=\"\n \n return base + location_string + \"&key=\" + google_api_key",
"def geocode(postcode):\n key = current_app.config.get(\"OS_PLACES_API_KEY\")\n formatted_addresses = FormattedAddressLookup(key=key).by_postcode(postcode)\n response = [{\"formatted_address\": address} for address in formatted_addresses if address]\n return Response(json.dumps(response), mimetype=\"application/json\")",
"def get_location(body, returnthis):\n m = re.search(r\"\\bsteemitworldmap\\b\\s([-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?))\\s\\blat\\b\\s([-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?))\", body)\n if m:\n try:\n latitude = m.group(1)\n longitude = m.group(5)\n geolocator = Nominatim(user_agent=\"travelfeed/0.1\")\n rawlocation = geolocator.reverse(latitude+\", \"+longitude, language=\"en\", timeout=10).raw\n address = rawlocation['address']\n state = address.get('state', None)\n if state == None: #Not every location has a state/region/... set!\n state = address.get('region', None)\n if state == None:\n state = address.get('state_district', None)\n if state == None:\n state = address.get('county', None)\n if state == None:\n state = \"\"\n country_code = str(address[\"country_code\"]).upper()\n country_object = pycountry.countries.get(alpha_2=country_code)\n try:\n country = country_object.common_name #Some countries such as Taiwan or Bolivia have a common name that is used instead of the official name\n except:\n country = country_object.name\n continent_code = pycountry_convert.country_alpha2_to_continent_code(country_code)\n if continent_code == \"AF\":\n continent = \"Africa\"\n elif continent_code == \"NA\":\n continent = \"North America\"\n elif continent_code == \"OC\":\n continent = \"Oceania\"\n elif continent_code == \"AN\":\n continent = \"Antarctica\"\n elif continent_code == \"AS\":\n continent = \"Asia\"\n elif continent_code == \"EU\":\n continent = \"Europe\"\n elif continent_code == \"SA\":\n continent = \"South America\"\n if returnthis == None:\n location = state+\", \"+country+\", \"+continent\n return location\n if returnthis == \"continentcode\":\n return continent_code\n except Exception as error:\n logger.warning(\"Could not determine location: \"+repr(error))\n return None\n else:\n return None",
"def search(self):\n return self.key.geocode(self.cleanplace)",
"def geocode(self, resource):\n # Turn the different address components into a formatted string\n search_address = \", \".join(a for a in [resource.street,\n resource.city, resource.state,\n resource.zipcode, resource.country]\n if a is not None and not\n a.isspace())\n\n # Make sure we generated something meaningful\n if search_address and search_address is not None:\n # Now query the geocoder with this formatted string\n geolocator = GoogleV3(api_key=self.api_key)\n address, (latitude, longitude) = geolocator.geocode(search_address)\n\n # Update the resource based on the returned geopy.location.Location\n if address and not address.isspace():\n resource.fulladdress = address\n\n if latitude and longitude:\n resource.latitude = latitude\n resource.longitude = longitude\n\n # FUTURE: Perform additional normalization operations based\n # on the information in Location.raw\n pass",
"def geocoding(address):\n AUTH = json.loads(open(\"auth.json\", \"r\").read())\n\n r = requests.get(f\"https://maps.googleapis.com/maps/api/geocode/json\", params={\n \"address\": address,\n \"key\": AUTH[\"GMAP_API\"]\n })\n\n if r.status_code == 200:\n r = r.json()\n results = r[\"results\"]\n if len(results) < 1:\n log.error(\"No result geocoding for %s\", address)\n return (-1, -1)\n\n result = results[0]\n proper_address = result[\"formatted_address\"]\n loc = result[\"geometry\"][\"location\"]\n lat = loc[\"lat\"]\n lng = loc[\"lng\"]\n\n return (proper_address, lat, lng)\n\n else:\n log.error(\"Error in Geocoding %s\", address)\n return (-1, -1)",
"def fetch_mapzen_response(location):\n MAPZEN_BEG = 'https://search.mapzen.com/v1/search'\n keytxt = read_mapzen_credentials()\n mapzenparams = {'api_key': keytxt, 'text': location}\n resp = requests.get(MAPZEN_BEG, params = mapzenparams)\n return resp.text",
"def google_geocode(query):\n\tif not API_KEY:\n\t\traise ConfigException(\"Require API_KEY for googleapi. Reload after setting.\")\n\td = {\"address\" : query.encode(\"utf-8\"), \"key\" : API_KEY }\n\tf = urlopen(LOC_URL % (urlencode(d)))\n\tlocdata = load(f)\n\tif f.getcode() == 200:\n\t\tif \"results\" in locdata:\n\t\t\titem = locdata[\"results\"]\n\t\t\tif len(item) == 0:\n\t\t\t\treturn None\n\t\t\titem = item[0]\n\t\t\tll = item.get(\"geometry\", {}).get(\"location\") # lol tricky\n\t\t\tif not ll: return None\n\t\t\treturn item[\"formatted_address\"], ll[\"lat\"], ll[\"lng\"]\n\t\telse:\n\t\t\treturn None\n\telse:\n\t\traise RuntimeError(\"Error (%s): %s\" % (f.getcode(), locdata.replace(\"\\n\", \" \")))",
"def geocode(self, query, exactly_one=True, timeout=None):\n params = {\n 'addr': self.format_string % query,\n }\n if self.api_key:\n params['key'] = self.api_key\n url = \"?\".join((self.api, urlencode(params)))\n logger.debug(\"%s.geocode: %s\", self.__class__.__name__, url)\n return self._parse_json(\n self._call_geocoder(url, timeout=timeout), exactly_one\n )",
"def geocode(self, request):\n latlng = request.query_params.get('latlng')\n if not latlng:\n raise RequiredParameter('latlng')\n try:\n lat = float(latlng.split(',')[0])\n lng = float(latlng.split(',')[1])\n except Exception:\n raise InvalidParameter('latlng', _('Invalid `latlng`'))\n ip = get_real_ip(request)\n location = location_controller.from_location_index(lat, lng, ip)\n return Response(location)",
"def search_from_geocode(api, geocode, **kwargs):\n tweets=api.GetSearch(geocode=geocode)\n return {\"tweets\":tweets}",
"def reverse_geocoding(lat, lng, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n params = {\n 'latlng': '{},{}'.format(lat, lng),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n geodata = parse_response(response)\n return geodata",
"def geocodeLocation(location, geocodeDB, tries=0):\n try:\n location = encodeUrl(location)\n except UnicodeDecodeError:\n pass\n\n try:\n row = geocodeDB.find_one({'query':location})\n except:\n row = None\n if row:\n print 'Cached. Times: {%d}' % row['count']\n results = row['results']\n geocodeDB.update({'_id':row['_id']}, {'$inc':{'count':1}})\n else:\n url = \"http://open.mapquestapi.com/nominatim/v1/search?format=json&addressdetails=0&bounded=1&viewbox=-123.173825,37.9298443,-122.28178,37.63983&q=%s\" % location\n print url\n try:\n results = json.loads(urllib2.urlopen(url).read())\n print results\n except (socket.timeout, socket.error):\n print 'failed to get: %s' % url\n #print 'socket.timeout so waiting a few seconds before trying again'\n if tries >= 3:\n geocodeDB.save({'query':location, 'results':None, 'count':1})\n return None\n else:\n for i in range(5):\n sys.stdout.write(' %d seconds left\\r' % (5 - i))\n sys.stdout.flush()\n time.sleep(1)\n print 'continuing...'\n return geocodeLocation(location, geocodeDB, tries+1)\n #return None\n except urllib2.URLError:\n print \"URLError\"\n return None\n geocodeDB.save({'query':location, 'results':results, 'count':1})\n if results:\n for obj in results:\n return {'lat':obj['lat'], 'lon':obj['lon']}\n else:\n print \"no results for: %s\", location\n return None",
"def geocode(address):\n\n mapsurl = ('http://maps.googleapis.com/maps/api/geocode/xml?address=' +\n address.replace(' ', '+') + '&sensor=false')\n\n coords = urllib.urlopen(mapsurl).read()\n root = etree.fromstring(coords)\n coordstr = (0, 0)\n loc = root.find(\".//location\")\n if not loc is None:\n coordstr = (loc[1].text, loc[0].text)\n return coordstr",
"def geocode(self, term):\n # TODO: permitir cofigurar isso...\n # limit string size\n s = term[:60]\n # check cache\n term_geo = self.cache.get(s)\n if not term_geo:\n term_geo = {}\n # query all servers\n for server_name, func in self.server_options.items():\n points = func(s)\n term_geo[server_name] = []\n for point in points:\n region = self.inside_limits(point)\n if region:\n if region is True:\n region = \"???\"\n print(region)\n term_geo[server_name].append({\n \"address\": point.address,\n \"latitude\": point.latitude,\n \"longitude\": point.longitude,\n \"region\": region\n })\n self.cache[s] = term_geo\n # print(\"------------------------------------\")\n # print(term_geo)\n return term_geo",
"def geocode_extract(location, maps_key):\n gmaps = googlemaps.Client(key=maps_key)\n geocode_result = gmaps.geocode(location, region='il', language='iw')\n if geocode_result is None or geocode_result == []:\n return None\n response = geocode_result[0]\n geom = response['geometry']['location']\n street = ''\n road_no = ''\n intersection = ''\n city = ''\n district = ''\n for item in response['address_components']:\n if 'route' in item['types']:\n if item['short_name'].isdigit():\n road_no = item['short_name']\n else:\n street = item['long_name']\n elif 'point_of_interest' in item['types'] or 'intersection' in item['types']:\n intersection = item['long_name']\n elif 'locality' in item['types']:\n city = item['long_name']\n elif 'administrative_area_level_1' in item['types']:\n district = item['long_name']\n address = response['formatted_address']\n return {'street': street, 'road_no': road_no, 'intersection': intersection,\n 'city': city, 'address': address,\n 'district': district, 'geom': geom}",
"def net_xy(street):\r\n\r\n # api-endpoint\r\n URL = \"https://ags.govmap.gov.il/Search/FreeSearch\"\r\n # headers\r\n headers = {\"Content-Type\": \"application/json\", \"charset\": \"utf-8\"}\r\n # location given here\r\n try:\r\n p = \"{\\\"keyword\\\": \\\"\" + street + \"\\\",\\\"LstResult\\\": null}\"\r\n PARAMS = p.encode(\"utf-8\")\r\n\r\n # sending get request and saving the response as response object\r\n r = requests.post(url=URL, data=PARAMS, headers=headers)\r\n\r\n # extracting data in json format\r\n data = r.json()\r\n\r\n # extracting latitude, longitude and formatted address\r\n # of the first matching location\r\n\r\n X = data['data']['Result'][0]['X']\r\n Y = data['data']['Result'][0]['Y']\r\n except Exception as e:\r\n print(e)\r\n # print('exception ddamammnnnnn')\r\n print(street)\r\n return 0,0\r\n return X,Y",
"def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue",
"def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }",
"def return_address_from_location(location='0,0'):\n if not re.compile('^(\\-?\\d+(\\.\\d+)?),\\s*(\\-?\\d+(\\.\\d+)?)$').match(location):\n raise ValueError('Location Invalid')\n base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'\n latlng = 'latlng=' + location\n try:\n #This try block is for our first 150,000 requests. If we exceed this, use Jack's Token.\n key_string = '&key=' + ACCESS_TOKEN\n url = base_url + latlng + key_string #Builds the url\n result = requests.get(url).json() #Gets google maps json file\n cleaned = result['results'][0]['address_components']\n #Address to check against value of check_against_business_location\n chk = cleaned[0]['long_name'] + ' ' + cleaned[1]['long_name'] + ', ' + cleaned[3]['long_name']\n business_tuple = check_against_business_location(location, chk)\n if business_tuple[0]: #If true, the lat, lon matches a business location and we return business name\n return business_tuple[1]\n else: #otherwise, we just return the address\n return cleaned[0]['long_name'] + ' ' + cleaned[1]['short_name'] + ', ' + cleaned[3]['short_name']\n except:\n try:\n #Use Jack's Token in case of some invalid request problem with other API Token\n key_string = '&key=' + JACK_TOKEN\n url = base_url + latlng + key_string #Builds the url\n result = requests.get(url).json() #Gets google maps json file\n cleaned = result['results'][0]['address_components']\n #Address to check against value of check_against_business_location\n chk = cleaned[0]['long_name'] + ' ' + cleaned[1]['long_name'] + ', ' + cleaned[3]['long_name']\n business_tuple = check_against_business_location(location, chk)\n if business_tuple[0]: #If true, the lat, lon matches a business location and we return business name\n return business_tuple[1]\n else: #otherwise, we just return the address\n return cleaned[0]['long_name'] + ' ' + cleaned[1]['short_name'] + ', ' + cleaned[3]['short_name']\n except:\n raise ValueError(\"Something went wrong\")"
] |
[
"0.77716124",
"0.7181079",
"0.71422786",
"0.7139193",
"0.71137244",
"0.7111963",
"0.7066725",
"0.69260824",
"0.6847181",
"0.68150973",
"0.67805934",
"0.673379",
"0.6728002",
"0.67107326",
"0.66972625",
"0.6691047",
"0.66596097",
"0.6584444",
"0.6549973",
"0.65382755",
"0.6485653",
"0.64827424",
"0.6477998",
"0.6404349",
"0.63755643",
"0.63540983",
"0.6348092",
"0.63403326",
"0.6323986",
"0.6310873"
] |
0.7777906
|
0
|
Tests predictable success & failure of different values for a specified parameter when passed to a specified function
|
def test_parameter_values(func,
var_name=None,
fail_list=[],
success_list=[],
**kwargs):
# If variable name is specified, test each value in fail_list
# and success_list
if var_name is not None:
# User feedback
print("Testing %s() parameter %s ..." % (func.__name__, var_name))
# Test parameter values that should fail
for x in fail_list:
kwargs[var_name] = x
test_for_mistake(func=func, should_fail=True, **kwargs)
# Test parameter values that should succeed
for x in success_list:
kwargs[var_name] = x
test_for_mistake(func=func, should_fail=False, **kwargs)
print("Tests passed: %d. Tests failed: %d.\n" %
(global_success_counter, global_fail_counter))
# Otherwise, make sure function without parameters succeeds
else:
# User feedback
print("Testing %s() without parameters." % func.__name__)
# Test function
test_for_mistake(func=func, should_fail=False, **kwargs)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_correct_value(self):\n self.assertTrue(py_function(6) == 36)\n self.assertFalse(py_function(5) == 9)\n for i in range(0, 10):\n self.assertTrue(py_function(i) == i**2 if i != 0 else 100)",
"def _check_value(self, value, name, check_function):\n if check_function is not None:\n is_good = check_function(value) #May raise an exception\n assert is_good in [0,1,True,False]\n if not is_good:\n raise ValueError(\"Invalid parameter value %r for parameter %s\" \\\n % (value, name))",
"def testfn(arg):\n if arg == 42:\n raise ValueError('Oh noes')\n return arg",
"def run_one_test(expected, func):\n answer = func(expected)\n if answer == expected:\n print(\".\")\n print(\"ok\")\n else:\n print(\"F\")\n print(f\"error: expected {expected}, got {answer}\")",
"def c_test_set_inp(self, param, value):\r\n ret = 1\r\n if \"__hash__\" not in dir(param): # param must be hashable\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is not hashable. It will be unable to be set in a dict.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is hashable.\")\r\n if param in [\"population_size\", \"time_constraint\", \"generations\"]:\r\n if not ((isinstance(value, int) or\r\n isinstance(value, float) or\r\n isinstance(value, long))):\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be of a number. It is \" + str(value))\r\n ret = 0\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set to a number.\")\r\n if value < 0:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be greater than zero.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is greater than zero.\")\r\n if param in [\"population_size\", \"generations\"]:\r\n if not isinstance(value, int):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be an integer. It is \" + str(value))\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is an integer.\")\r\n if param in [\"fitness_function\", \"population_function\",\r\n \"mutate_function\", \"cross_function\", \"weighting_bias\"]:\r\n if not callable(value):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be a callable function.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is a callable function.\")\r\n if param == \"end_condition\":\r\n if value not in [\"time_constraint\", \"generations\"]:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be 'time_constraint' or 'generations'\")\r\n else:\r\n if self.verbosity > 1:\r\n print(\"ERROR: \" + param + \" is a correct string.\")\r\n if param == \"seed\":\r\n if not (value is None or isinstance(value, int)):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is incorrectly set.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set.\")\r\n return ret",
"def c_test_set_inp(self, param, value):\r\n ret = 1\r\n if \"__hash__\" not in dir(param): # param must be hashable\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is not hashable. It will be unable to be set in a dict.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is hashable.\")\r\n if param in [\"population_size\", \"time_constraint\", \"generations\", \"point_count\"]:\r\n if not ((isinstance(value, int) or\r\n isinstance(value, float) or\r\n isinstance(value, long))):\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be of a number. It is \" + str(value))\r\n ret = 0\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set to a number.\")\r\n if value < 0:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be greater than zero.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is greater than zero.\")\r\n if param in [\"population_size\", \"generations\", \"point_count\"]:\r\n if not isinstance(value, int):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be an integer. It is \" + str(value))\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is an integer.\")\r\n if param in [\"fitness_function\", \"population_function\",\r\n \"mutate_function\", \"cross_function\", \"weighting_bias\"]:\r\n if not callable(value):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be a callable function.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is a callable function.\")\r\n if param == \"end_condition\":\r\n if value not in [\"time_constraint\", \"generations\"]:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be 'time_constraint' or 'generations'\")\r\n else:\r\n if self.verbosity > 1:\r\n print(\"ERROR: \" + param + \" is a correct string.\")\r\n if param == \"seed\":\r\n if not (value is None or isinstance(value, int)):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is incorrectly set.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set.\")\r\n return ret",
"def check_output(self, function, output=None, *value):\n if output is None:\n output = None\n self.assertEqual(function(*value), output)",
"def test_function(arg):\n return arg * 2",
"def check_scalar(self, register: str, value: int):\n assert self._call is not None, f\"You must first call a function before checking its return values!\"\n assert isinstance(value, int), f\"{value} is a {type(value)}, expected an int!\"\n \"\"\" Checks that when this function is called, we have not already assembled and run the test. \"\"\"\n assert not self._has_executed, f\"Test has already been assembled and run!\"\n exit_code = 8\n saved_register = self._parse_register(register)\n lbl = self._make_lbl(f\"{register}_eq_{value}\")\n msg = f\"msg{self._msg_count}\"\n self._msg_count += 1\n self.data += [f\"{msg}: .asciiz \\\"expected {register} to be {value} not: \\\"\"]\n self._checks += [\n \"\", f\"# check that {register} == {value}\",\n f\"li t0 {value}\", f\"beq {saved_register} t0 {lbl}\",\n \"# print error and exit\",\n f\"la a1, {msg}\", \"jal print_str\",\n f\"mv a1 {saved_register}\", \"jal print_int\",\n \"# Print newline\", \"li a1 '\\\\n'\", \"jal ra print_char\",\n f\"# exit with code {exit_code} to indicate failure\",\n f\"li a1 {exit_code}\", \"jal exit2\",\n f\"{lbl}:\", \"\"\n ]",
"def assertion_passed(self, func):",
"def test_function_values(self):\n\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_foo\",\n self.pick_byte2,\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=1,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_bar\",\n lambda x : bool(x&2), # bit map example\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=False,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n \n # check defaults just to be safe\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 1)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n result = self.param_dict.update(1005) # just change first in list\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 3)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # fn_bar does not get updated here\n result = self.param_dict.update_many(1205)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(len(result), 1)\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 4)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # both are updated now\n result = self.param_dict.update_many(6)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(result['fn_bar'], True)\n self.assertEqual(len(result), 2)\n \n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 0)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, True)",
"def test_function(arg_1):\n return arg_1 * 2",
"def c_test_set_inp(self, param, value):\r\n ret = 1\r\n if \"__hash__\" not in dir(param): # param must be hashable\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is not hashable. It will be unable to be set in a dict.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is hashable.\")\r\n if param in [\"population_size\", \"time_constraint\", \"generations\", \"point_count\",\r\n \"PSO_VELOCITY_WEIGHT\", \"PSO_INDIVIDUAL_WEIGHT\", \"PSO_GROUP_WEIGHT\"]:\r\n if not ((isinstance(value, int) or\r\n isinstance(value, float) or\r\n isinstance(value, long))):\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be of a number. It is \" + str(value))\r\n ret = 0\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set to a number.\")\r\n if value < 0:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be greater than zero.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is greater than zero.\")\r\n if param in [\"population_size\", \"generations\", \"point_count\"]:\r\n if not isinstance(value, int):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be an integer. It is \" + str(value))\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is an integer.\")\r\n if param in [\"fitness_function\", \"weighting_bias\"]:\r\n if not callable(value):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be a callable function.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is a callable function.\")\r\n if param == \"end_condition\":\r\n if value not in [\"time_constraint\", \"generations\"]:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be 'time_constraint' or 'generations'\")\r\n else:\r\n if self.verbosity > 1:\r\n print(\"ERROR: \" + param + \" is a correct string.\")\r\n if param == \"seed\":\r\n if not (value is None or isinstance(value, int)):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is incorrectly set.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set.\")\r\n return ret",
"def test_err_if(\n self, predicate: t.Callable, val: t.Any, exp: Result\n ) -> None:\n assert Result.err_if(predicate, val) == exp",
"def expected(x, y):",
"def expected(x, y):",
"def expected(x, y):",
"def __call__(self): # run test\n\n try: # Check if any errors were raised during calling of self.func\n return abs(self.func(*self.args, **self.kwargs) - self.res) < self._tolerance\n\n except IndexError:\n return False",
"def test_from_func_variational(pres, nbr_by_label, nbr_by_label_test, nbr_comp, plot_graph, function):\n print(pres)\n _, samp_train, samp_test, labels = function(nbr_by_label, nbr_by_label_test, nbr_comp, plot_graph)\n\n if len(labels) == 2:\n print(\"Success for my implementation (second order, variational):\")\n my_impl_variational(samp_train, samp_test, labels)\n\n return 0",
"def test_task_with_one_int_validation_parameter_validate_data(number, expected_value):\r\n\r\n assert algo.TaskWithOneIntValidationParameter.validate_data(number) == expected_value",
"def checkFails(comment, errstr, function, update=True, args=None, kwargs=None):\n print('Error testing ...')\n if args is None:\n args = []\n if kwargs is None:\n kwargs = {}\n try:\n function(*args,**kwargs)\n res = False\n msg = 'Function call did not error!'\n except Exception as e:\n res = checkSame('',e.args[0],errstr,update=False)\n if not res:\n msg = 'Unexpected error message. \\n Received: \"{}\"\\n Expected: \"{}\"'.format(e.args[0],errstr)\n if update:\n if res:\n results[\"pass\"] += 1\n print(' ... end Error testing (PASSED)')\n else:\n print(\"checking error\",comment,'|',msg)\n results[\"fail\"] += 1\n print(' ... end Error testing (FAILED)')\n print('')\n return res",
"def test_tolerate_decorated_function_raise_if_switch_fail():\n def test_function():\n raise AttributeError()\n def test_switch(*args, **kwargs):\n return False, args, kwargs\n fn = tolerate(switch=test_switch)(test_function)\n fn()",
"def test_function(my_data):\n return my_data == 42",
"def le_success_func(target, result):\n if result is None:\n return False\n return result <= target",
"def test(test_function, parameters, invert=False, name=None, show_output=True):\n\n global count\n\n if not name:\n name = test_function.__name__\n\n # Actual Testing\n\n result = test_function(*parameters)\n\n if invert:\n result = not result\n\n # Printing Results\n\n if show_output:\n print('Test #%d: \\'%s\\' -- ' % (count, name), end='')\n\n if result:\n print('PASS')\n else:\n print('FAIL')\n\n count += 1\n\n return result",
"def param_vals_test(param_dict):\n file_msg = param_dict['Prog_msg']\n ##\n ## Testing if `wget` exists in the system\n if is_tool('wget'):\n pass\n else:\n msg = '{0} You need to have `wget` installed in your system to run '\n msg += 'this script. You can download the entire dataset at {1}.\\n\\t\\t'\n msg += 'Exiting....'\n msg = msg.format(file_msg, param_dict['url_catl'])\n raise ValueError(msg)\n ##\n ## Checking that Esmeralda is not ran when doing 'SO' halos\n if (param_dict['halotype'] == 'so') and (param_dict['sample'] == 20):\n msg = '{0} The `halotype`==`so` and `sample`==`20` are no compatible '\n msg += 'input parameters.\\n\\t\\t'\n msg += 'Exiting...'\n msg = msg.format(file_msg)\n raise ValueError(msg)\n ##\n ## Checking that `hod_model_n` is set to zero for FoF-Halos\n if (param_dict['halotype'] == 'fof') and (param_dict['hod_n'] != 0):\n msg = '{0} The `halotype`==`{1}` and `hod_n`==`{2}` are no compatible '\n msg += 'input parameters.\\n\\t\\t'\n msg += 'Exiting...'\n msg = msg.format( file_msg,\n param_dict['halotype'],\n param_dict['hod_n'])\n raise ValueError(msg)\n ##\n ## Checking input different types of `test_train_opt`\n #\n # `sample_frac`\n if (param_dict['test_train_opt'] == 'sample_frac'):\n # `sample_frac`\n if not ((param_dict['sample_frac'] > 0) and\n (param_dict['sample_frac'] <= 1.)):\n msg = '{0} `sample_frac` ({1}) must be between (0,1]'.format(\n file_msg, param_dict['sample_frac'])\n raise ValueError(msg)\n # `test_size`\n if not ((param_dict['test_size'] > 0) and\n (param_dict['test_size'] < 1)):\n msg = '{0} `test_size` ({1}) must be between (0,1)'.format(\n file_msg, param_dict['test_size'])\n raise ValueError(msg)\n #\n # boxes_n\n if (param_dict['test_train_opt'] == 'boxes_n'):\n box_n_arr = num.array(param_dict['box_idx'].split('_')).astype(int)\n box_n_diff = num.diff(box_n_arr)\n # Larger than zero\n if not (all(box_n_arr >= 0)):\n msg = '{0} All values in `box_idx` ({1}) must be larger than 0!'\n msg = msg.format(file_msg, box_n_arr)\n raise ValueError(msg)\n # Difference between elements\n if not (all(box_n_diff > 0)):\n msg = '{0} The value of `box_idx` ({1}) is not valid!'.format(\n file_msg, param_dict['box_idx'])\n raise ValueError(msg)\n #\n # `box_test`\n if (param_dict['test_train_opt'] == 'box_sample_frac'):\n # Value of `box_test`\n if not (param_dict['box_test'] >= 0):\n msg = '{0} `box_test` ({1}) must be larger or equal to `0`.'\n msg = msg.format(file_msg, param_dict['box_test'])\n raise ValueError(msg)\n # Testing `test_size`\n # `test_size`\n if not ((param_dict['test_size'] > 0) and\n (param_dict['test_size'] < 1)):\n msg = '{0} `test_size` ({1}) must be between (0,1)'.format(\n file_msg, param_dict['test_size'])\n raise ValueError(msg)\n ##\n ## Checking that `kf_splits` is larger than `2`\n if (param_dict['kf_splits'] < 2):\n msg = '{0} The value for `kf_splits` ({1}) must be LARGER than `2`'\n msg += 'Exiting...'\n msg = msg.format(param_dict['Prog_msg'], param_dict['kf_splits'])\n raise ValueError(msg)\n ##\n ## Checking that `n_predict` is not smaller than `1`.\n if (param_dict['n_predict'] < 1):\n msg = '{0} The value for `n_predict` ({1}) must be LARGER than `1`'\n msg += 'Exiting...'\n msg = msg.format(param_dict['Prog_msg'], param_dict['n_predict'])\n raise ValueError(msg)",
"def test_for_mistake(func, *args, **kw):\n\n global global_fail_counter\n global global_success_counter\n\n # print test number\n test_num = global_fail_counter + global_success_counter\n print('Test # %d: ' % test_num, end='')\n #print('Test # %d: ' % test_num)\n\n # Run function\n obj = func(*args, **kw)\n # Increment appropriate counter\n if obj.mistake:\n global_fail_counter += 1\n else:\n global_success_counter += 1",
"def test_positive_value_exists(self):\n #######################################\n # Test for True\n value_to_test = 1\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = 100\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = 'hello'\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = True\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = {\n 'success': True\n }\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n value_to_test = [\n 'success'\n ]\n self.assertEqual(positive_value_exists(value_to_test), True,\n \"Testing value: {value_to_test}, True expected\".format(value_to_test=value_to_test))\n\n #######################################\n # Test for False\n value_to_test = 0\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = -1\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = ''\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = '0'\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = False\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = {}\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))\n\n value_to_test = []\n self.assertEqual(positive_value_exists(value_to_test), False,\n \"Testing value: {value_to_test}, False expected\".format(value_to_test=value_to_test))",
"def test_Utilities__test_2():\n assert test(False, 1, False, 2, 3) == 3\n assert test(False, 1, 2) == 2\n assert test(1) == 1\n assert not test(False)",
"def func_case(self):\n test.success(\"\")"
] |
[
"0.6997643",
"0.67936534",
"0.6765097",
"0.67322314",
"0.66246575",
"0.65953",
"0.6588077",
"0.65858746",
"0.65613014",
"0.6553458",
"0.6549264",
"0.65439284",
"0.652965",
"0.646326",
"0.6396299",
"0.6396299",
"0.6396299",
"0.6383678",
"0.6357292",
"0.63565874",
"0.63524413",
"0.6331223",
"0.6317072",
"0.6276698",
"0.62293565",
"0.6227279",
"0.6224129",
"0.62104625",
"0.6208643",
"0.620665"
] |
0.7383306
|
0
|
Delete the NSR from openbaton based on user_info and the nsr
|
def release_resources(self, user_info, payload=None):
ob_client = OBClient(user_info.name)
logger.info('Deleting resources for user: {}'.format(user_info.name))
logger.debug('Received this payload: {}'.format(payload))
nsr = None
try:
nsr = json.loads(payload)
assert type(nsr) == dict
except:
logger.warning('Could not parse release resource payload to JSON: {}'.format(payload))
traceback.print_exc()
if nsr:
try:
remove_nsr_to_check(nsr.get('id'))
except:
pass
return
if nsr.get('type') == 'NfvResource' and nsr.get('properties') is not None:
logger.debug('The payload does not seem to be an NSR so the resource was probably not yet deployed and '
'nothing has to be removed from Open Baton.')
return
nsd_id = nsr.get('descriptor_reference')
try:
nsd = json.loads(ob_client.get_nsd(nsd_id))
except NfvoException:
traceback.print_exc()
return
vnfd_ids = []
for vnfd in nsd.get('vnfd'):
vnfd_ids.append(vnfd.get('id'))
try:
try_delete_nsr(nsr, ob_client)
try_delete_nsd(nsd_id, ob_client)
# TODO to be added if cascade is not enabled
# for vnfd_id in vnfd_ids:
# self.try_delete_vnfd(vnfd_id, ob_client)
except NfvResourceDeleteException as e:
traceback.print_exc()
logger.error("...ignoring...")
remove_nsr_to_check(nsr.get('id'))
logger.info("Removed resource %s" % nsr.get('name'))
# remove_all(ob_client)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_user():",
"def run(self):\n tenant_id = self.context[\"tenant\"][\"id\"]\n users = self.context[\"tenants\"][tenant_id][\"users\"]\n number = users.index(self.context[\"user\"])\n for network in self.context[\"tenants\"][tenant_id][\"networks\"]:\n # delete one of subnets based on the user sequential number\n subnet_id = network[\"subnets\"][number]\n self.neutron.delete_subnet(subnet_id)",
"def removeROAnnotation(self, rouri, annuri):\n (status, reason, headers, data) = self.doRequest(annuri,\n method=\"DELETE\")\n return (status, reason)",
"def delete(self, urns, client_cert, credentials, best_effort): ### FIX the response\n result = []\n slice_urn = urns[0]\n # try:\n for urn in urns:\n if self._verify_users:\n logger.debug(\"delete: authenticate the user for %s\" % (urn))\n client_urn, client_uuid, client_email =\\\n self.auth(client_cert, credentials, urn, (\"deletesliver\",))\n logger.info(\"Client urn=%s, uuid=%s, email=%s\" % (\n client_urn, client_uuid, client_email,))\n\n try:\n links_db, nodes, links = self.SESlices.get_link_db(urn)\n except Exception as e:\n raise geni_ex.GENIv3GeneralError(\"Slice does not exist.\")\n\n reservation_ports = self.SESlices._allocate_ports_in_slice(nodes)[\"ports\"]\n\n portsVlansPairs = getPortsVlansPairs(links_db)\n\n try:\n for portVlanItem in portsVlansPairs:\n (in_port, out_port, in_vlan, out_vlan) = portVlanItem\n se_provision.deleteSwitchingRule(in_port, out_port, in_vlan, out_vlan)\n logger.debug(\"unprovision SE-Slice-Urn=%s, in_port=%s , out_port=%s, in_vlan=%s, out_port=%s\" % (urn,in_port, out_port, in_vlan, out_vlan))\n except:\n logger.warning(\"Problem in communication with SE\")\n\n # expires_date = datetime.strptime(links_db['geni_expires'], RFC3339_FORMAT_STRING)\n expires_date = links_db['geni_expires']\n\n\n for sliver in links_db[\"geni_sliver_urn\"]:\n result.append( \n { \n \"geni_sliver_urn\": sliver,\n \"geni_expires\": expires_date,\n \"geni_allocation_status\": \"geni_unallocated\",\n \"geni_operational_status\" : \"geni_notready\"\n }\n )\n\n # Mark resources as free\n self.SEResources.free_resource_reservation(reservation_ports)\n\n # Remove reservation\n self.SESlices.remove_link_db(urn)\n \n logger.info(\"delete successfully completed: %s\", slice_urn)\n \n return result\n\n # except:\n\n # raise geni_ex.GENIv3GeneralError(\"Delete Failed. Requested resources are not available.\")",
"def delete(self, xact, path):\n self._log.debug(\"Deleting NSR xact:%s, path:%s\", xact, path)\n self.regh.delete_element(path)\n self._log.debug(\"Deleted NSR xact:%s, path:%s\", xact, path)",
"def del_usrs (conn):\n\n try:\n csr = conn.cursor()\n\n cmd = \"DELETE FROM {tbl};\".\\\n format(tbl = _tbl_users)\n print(cmd)\n\n csr.execute(cmd)\n csr.close()\n\n except Exception as ex:\n print(\"Error - del_usrs: {0}\".format(ex))\n rc_err = ex.args[0]\n return rc_err\n\n return rc_ok",
"def delete(self, *args, **kwargs):\n\n lns_euid = None\n lgtw_euid = None\n\n if args[0]:\n try:\n lns_euid = EUI64(args[0]).id6\n except ValueError as err: \n self.set_status(400)\n self.finish({\"status_code\":400,\"title\":\"Value error (lns_euid)\",\"detail\":str(err)})\n\n if len(args) == 2:\n if args[1]:\n try:\n lgtw_euid = EUI64(args[1]).id6\n except ValueError as err: \n self.set_status(400)\n self.finish({\"status_code\":400,\"title\":\"Value error (lgtw_euid)\",\"detail\":str(err)})\n\n if len(args) == 2 and lns_euid and lgtw_euid:\n self.service.remove_lgtw(lns_euid, lns_euid)\n elif len(args) == 2 and not lns_euid and args[1]:\n self.service.remove_lgtw(lns_euid)\n elif lns_euid:\n lns_euid = lns_euid\n print(self.service.lgtws)\n for lgtw_euid in self.service.lgtws:\n self.service.remove_lgtw(lgtw_euid, lns_euid)\n else:\n for lns_euid in self.service.lnss:\n for lgtw_euid in self.service.lgtws:\n self.service.remove_lgtw_from_lns(lgtw_euid, lns_euid)",
"def delete(self, request, nnid):\n try:\n input_parm = request.data\n input_parm['nn_id'] = nnid\n return_data = NNCommonManager().delete_nn_info(input_parm)\n return Response(json.dumps(return_data))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))",
"def delete(self,user_id):\n user_status,calling_user = has_admin_privileges()\n if user_status == \"no_auth_token\":\n return (bad_request,400,headers)\n\n if user_status == \"not_logged_in\":\n return (unauthorized,401,headers)\n\n # getting the user. Assuming the user exists. Case of user not existing is checked below\n try:\n user = g.session.query(g.Base.classes.users).get(user_id)\n except Exception as err:\n print(type(err))\n print(err)\n return (internal_server_error,500,headers)\n\n # *Only Directors, Organizers and user calling the request\n if user:\n try:\n if user_status in [\"director\",\"organizer\"] or calling_user.id == user.id:\n if user.rsvps_collection:\n g.session.delete(g.session.query(g.Base.classes.rsvps).get(user.rsvps_collection[0].id))\n if user.applications_collection:\n g.session.delete(g.session.query(g.Base.classes.applications).get(user.applications_collection[0].id))\n g.session.delete(g.session.query(g.Base.classes.users).get(user_id))\n else:\n forbidden[\"error_list\"]={\"Authorization error\":\"You do not privileges to access this resource. Contact one of the organizers if you think require access.\"}\n return (forbidden,403,headers)\n except Exception as err:\n print(type(err))\n print(err)\n return (internal_server_error, 500, headers)\n else:\n return (not_found,404,headers)\n\n # error handling for mail send\n try:\n f = open(\"common/account_creation.html\",'r')\n body = Template(f.read())\n f.close()\n body = body.render(first_name = user.first_name)\n send_email(subject = \"Account creation confirmation!\",recipient = user.email, body = \"Account deleted!\")\n return (\"\",204,headers)\n except Exception as err:\n print(type(err))\n print(err)\n internal_server_error[\"error_list\"][\"error\"] = \"Account successfully created. Error in confirmation email sending.\"\n return (internal_server_error,500,headers)",
"def test_remove_nipsa_does_not_delete_user_that_does_not_exist(NipsaUser):\n NipsaUser.get_by_userid.return_value = None\n request = mock.Mock()\n\n logic.remove_nipsa(request=request, userid=\"test_id\")\n\n assert not request.db.delete.called",
"def delete_user():\n #TODO user delete\n pass",
"def remove_user_from_db(choice):\n client_detail_list = sqlite3.connect('../db/client_list.db')\n client_db = client_detail_list.cursor()\n client_db.execute(\"DELETE FROM clients WHERE nickname=?\", (choice,))\n client_detail_list.commit()\n client_detail_list.close()",
"def del_user(self, username):\n pass",
"def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200",
"def DelteUser(database):\n firstname=str(input(\"what is the name of the user you want to delete : \"))\n delusr,find =getByName(database,firstname)\n if not find:\n return\n del database[delusr.key]\n for key,usr in database.items():\n if delusr.key in usr.folow:\n usr.folow.remove(delusr.key)\n if delusr.key in usr.folowed:\n usr.folowed.remove(delusr.key)\n \n os.remove(f\"Users/{delusr.key}\")",
"def delete_user(network, user):\n if user in network:\n del network[user]\n for u in network:\n connections = get_connections(network, u)\n if user in connections:\n i = connections.index(user)\n del connections[i]\n return network",
"def remove_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n segment_name = kwargs[\"objectname\"]\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n segment_path = segment['results'][0]['path']\n status = remove_segment_json(proxy, sessiontoken, segment_path)\n if status == 200:\n print(f'The following network has been removed: {segment_name}')\n else:\n print(\"The segment was not removed. Please check your syntax and try again.\")\n sys.exit(1)\n else:\n print(\"The segment does not exist.\")",
"def delete(self, userinformation):\n self.db.remove(userinformation)",
"def remove_user(user):\n # user.confirmed = False\n # user = get_user_by_phone(phone_num)\n db.session.delete(user)\n db.session.commit()\n\n return user\n # DELETE FROM users WHERE user.phone_num == phone)",
"def delete_node(self, node_tup):\n signature = hashlib.sha256((uname+node_sig).encode('utf-8')).hexdigest() #hash value\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"DELETE FROM nodes WHERE ip==(:ip) AND port==(:port)\", { \"ip\":node_tup[1], \"port\":node_tup[2]})\n app_process.commit()\n app_process.close()",
"def delete_instigator_state(self, origin_id: str):",
"def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")",
"async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")",
"def test_delete_user_identity_mapping(self):\n pass",
"def test_destroy_nas_share_by_nas(self):\n pass",
"def remove_registrar(contest, user):\n _remove_role(contest, user, pcm.Registrar)",
"def deleteRO(self, rouri, purge=False):\n reqheaders=None\n if purge:\n reqheaders={\"Purge\": \"True\"}\n (status, reason, headers, data) = self.doRequest(rouri,\n method=\"DELETE\", reqheaders=reqheaders)\n if status in [204, 404]:\n return (status, reason)\n raise self.error(\"Error deleting RO\", \"%03d %s (%s)\"%(status, reason, str(rouri)))",
"def delete_user_by_xng_id(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/xngId/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['xngId'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")",
"def delete():",
"def do_nic_delete(cc, args):\n cc.nic.delete(args.uuid)\n print(_(\"%s deleted\" % args.uuid))"
] |
[
"0.60865736",
"0.60510623",
"0.5833243",
"0.57836574",
"0.5740733",
"0.5665142",
"0.56516165",
"0.5639647",
"0.5607864",
"0.55743295",
"0.55386347",
"0.553106",
"0.55285126",
"0.5520713",
"0.55151373",
"0.5484675",
"0.5473775",
"0.5471058",
"0.54653317",
"0.5457935",
"0.54265714",
"0.5423149",
"0.54229116",
"0.5420344",
"0.5405308",
"0.5400079",
"0.5390271",
"0.5389287",
"0.5385238",
"0.5384426"
] |
0.60889333
|
0
|
Clears the fingerprint, making the object reusable
|
def clear(self):
self._fingerprint = 0
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def clear(self) :\n self.__dict__ = {}",
"def clear(self) -> None:\n # Delete these so the .by_class/name values are cleared.\n self['classname'] = 'info_null'\n del self['targetname']\n self._keys.clear()\n # Clear $fixup as well.\n self._fixup = None",
"def clear(self):\n self._pkcache = {}\n self._typecache = defaultdict(dict)\n self.init()",
"def clear(self): # real signature unknown; restored from __doc__\n pass",
"def clear():",
"def clear(self):\n ...",
"def clear(self):\n self.initialize()\n self.device_disconnect()",
"def __reset__(self):\n\n for i in self.__dict__.keys():\n self.__dict__[i] = None",
"def clear(self) -> None:",
"def __init__(self):\r\n self.clear()",
"def clear(self):\n self.__dict__.clear()",
"def __del__(self):\n self.Clear()",
"def __init__(self):\n self.clear()",
"def clear(self) -> None:\n ...",
"def clear(self):",
"def clear(self):",
"def clear(self):",
"def clear(self):",
"def clear(self):",
"def clear(self):",
"def clear(self):",
"def Clear(self): # real signature unknown; restored from __doc__\n pass",
"def Clear(self): # real signature unknown; restored from __doc__\n pass",
"def Clear(self): # real signature unknown; restored from __doc__\n pass",
"def Clear(self): # real signature unknown; restored from __doc__\n pass",
"def Clear(self): # real signature unknown; restored from __doc__\n pass",
"def Clear(self): # real signature unknown; restored from __doc__\n pass",
"def Clear(self): # real signature unknown; restored from __doc__\n pass",
"def Clear(self): # real signature unknown; restored from __doc__\n pass",
"def Clear(self): # real signature unknown; restored from __doc__\n pass"
] |
[
"0.7221925",
"0.70681465",
"0.70423144",
"0.7020949",
"0.6962887",
"0.6962776",
"0.6916138",
"0.6908658",
"0.6898825",
"0.6898378",
"0.689274",
"0.68828136",
"0.6880186",
"0.68772185",
"0.68727833",
"0.68727833",
"0.68727833",
"0.68727833",
"0.68727833",
"0.68727833",
"0.68727833",
"0.6843343",
"0.6843343",
"0.6843343",
"0.6843343",
"0.6843343",
"0.6843343",
"0.6843343",
"0.6843343",
"0.6843343"
] |
0.8715687
|
0
|
Creates one task for each ingest chunk present in the build folder. It is required that the info file is already placed in order for this task to run succesfully.
|
def create_ingest_task(storage, task_queue):
for filename in storage.list_files(prefix='build/'):
t = IngestTask(
chunk_path=storage.get_path_to_file('build/'+filename),
chunk_encoding='npz',
layer_path=storage.layer_path,
)
task_queue.insert(t)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generate_tasks(self, task):",
"def gen_tasks(self):\n self.kw = {\n 'image_srcset_sizes': self.site.config['IMAGE_SRCSET_SIZES'],\n 'image_srcset_format': self.site.config['IMAGE_SRCSET_FORMAT'],\n 'extra_image_extensions': self.site.config['EXTRA_IMAGE_EXTENSIONS'],\n 'max_image_size': self.site.config['MAX_IMAGE_SIZE'],\n 'image_folders': self.site.config['IMAGE_FOLDERS'],\n 'output_folder': self.site.config['OUTPUT_FOLDER'],\n 'filters': self.site.config['FILTERS'],\n 'preserve_exif_data': self.site.config['PRESERVE_EXIF_DATA'],\n 'exif_whitelist': self.site.config['EXIF_WHITELIST'],\n 'preserve_icc_profiles': self.site.config['PRESERVE_ICC_PROFILES'],\n }\n\n self.image_ext_list = self.image_ext_list_builtin\n self.image_ext_list.extend(self.site.config.get('EXTRA_IMAGE_EXTENSIONS', []))\n\n yield self.group_task()\n for src in self.kw['image_folders']:\n dst = self.kw['output_folder']\n filters = self.kw['filters']\n real_dst = os.path.join(dst, self.kw['image_folders'][src])\n for task in self.process_tree(src, real_dst):\n task['basename'] = self.name\n task['uptodate'] = [utils.config_changed(self.kw)]\n yield utils.apply_filters(task, filters)",
"def task_generate_tasks():\n \n yield {\n 'basename': 'generate_tasks',\n 'name': None,\n # 'doc': 'docs for X',\n 'watch': ['trains/'],\n 'task_dep': ['create_folders'],\n }\n \n for root, dirs, files in os.walk('trains/',topdown=False):\n for f in files:\n #print(f)\n yield template_train_model(os.path.join(root,f))",
"def create_task():",
"def post_build(self, manager):\n if not self.output_files_dir.exists():\n return\n\n output_file_dirs = [\n d for d in self.output_files_dir.rglob(\"*\") if d.is_dir()\n ] + [self.output_files_dir]\n for output_file_dir in output_file_dirs:\n stem = output_file_dir.relative_to(self.output_files_dir)\n api_path = self.api_dir / stem / ALL_JSON\n\n yield self.task(\n name=f\"contents:{stem}\",\n doc=f\"create a Jupyter Contents API response for {stem}\",\n actions=[\n (self.one_contents_path, [output_file_dir, api_path]),\n (self.maybe_timestamp, [api_path]),\n ],\n file_dep=[p for p in output_file_dir.rglob(\"*\") if not p.is_dir()],\n targets=[api_path],\n )",
"def task_gen(self):\n pass",
"def create_from_files():\n logging.info('\"Create from files\" task started using config file %s', args.config)\n file_dir_path = config['input_dir']\n files = os.listdir(file_dir_path)\n\n for file_name in files:\n filename_without_extension = os.path.splitext(file_name)[0]\n if len(filename_without_extension) > 255:\n message = 'Truncating the filename \"' + filename_without_extension + '\" since it exceeds Drupal\\'s maximum node title length of 255 characters.'\n logging.error(message)\n filename_without_extension = filename_without_extension[:255]\n\n islandora_model = set_model_from_extension(file_name, config)\n\n node_json = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': filename_without_extension}\n ],\n 'status': [\n {'value': config['published']}\n ],\n 'field_model': [\n {'target_id': islandora_model,\n 'target_type': 'taxonomy_term'}\n ]\n }\n\n node_headers = {\n 'Content-Type': 'application/json'\n }\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node_json, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('+ Node for \"' + filename_without_extension + '\" created at ' + node_uri + '.')\n logging.info('Node for \"%s\" created at %s.', filename_without_extension, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, '', node_response.text)\n\n file_path = os.path.join(config['input_dir'], file_name)\n media_type = set_media_type(file_path, config)\n media_response_status_code = create_media(config, file_name, node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+ ' + media_type.title() + \" media for \" + filename_without_extension + \" created.\")\n logging.info(\"Media for %s created.\", file_path)\n else:\n logging.error('Node for \"%s\" not created, HTTP response code was %s.', os.path.join(config['input_dir'], file_name), node_response.status_code)",
"def job_ingestion(self):\n\n # Map a file type to a DAO\n if self.file_type == SmsFileTypes.SAMPLE_LIST:\n self.file_dao = SmsSampleDao()\n elif self.file_type == SmsFileTypes.N0:\n self.file_dao = SmsN0Dao()\n else:\n self.file_dao = None\n\n if self.file_dao:\n # look up if any rows exist already for the file\n records = self.file_dao.get_from_filepath(self.file_path)\n\n if records:\n logging.warning(f'File already ingested: {self.file_path}')\n return\n\n data_to_ingest = self.read_data_from_cloud_manifest(self.file_path)\n\n self.validate_columns(data_to_ingest[\"fieldnames\"], self.file_dao)\n\n self.write_data_to_manifest_table(data_to_ingest[\"rows\"])",
"def task_generate_job_batch():\n return {\n # force doit to always mark the task\n # as not up-to-date (unless target removed)\n 'uptodate': [False],\n 'file_dep': ['generate_job_batch.py'],\n 'task_dep': ['create_folders'],\n #'targets': ['.running_jobs/list_of_jobs.txt'],\n 'actions': ['python generate_job_batch.py'],\n }",
"def build(self, manager):\n contents = sorted(self.file_src_dest)\n output_files_dir = self.output_files_dir\n all_dest_files = []\n\n for src_file, dest_file in contents:\n all_dest_files += [dest_file]\n rel = dest_file.relative_to(output_files_dir)\n yield self.task(\n name=f\"copy:{rel}\",\n doc=f\"copy {src_file} to {rel}\",\n file_dep=[src_file],\n targets=[dest_file],\n actions=[\n (self.copy_one, [src_file, dest_file]),\n ],\n )\n\n if manager.source_date_epoch is not None:\n yield self.task(\n name=\"timestamp\",\n file_dep=all_dest_files,\n actions=[\n (self.maybe_timestamp, [self.output_files_dir]),\n ],\n )",
"def discover_tasks(app):\n\n task_arguments.add_argument(\n \"preload-defaults-from-site\",\n type=str,\n required=False,\n default=\"\",\n choices=preload_defaults_from_site_choices,\n help=\"Select site within environment to load defaults from, argument format is <environment_name>/<site_name>\",\n )\n\n for tasks_base_dir in app.config[\"JINJAMATOR_TASKS_BASE_DIRECTORIES\"]:\n for file_ext in [\"py\", \"j2\"]:\n for tasklet_dir in glob.glob(\n os.path.join(tasks_base_dir, \"**\", f\"*.{file_ext}\"), recursive=True\n ):\n task_dir = os.path.dirname(tasklet_dir)\n append = True\n for dir_chunk in task_dir.replace(tasks_base_dir, \"\").split(\n os.path.sep\n ): # filter out hidden directories\n if dir_chunk.startswith(\".\") or dir_chunk in [\"__pycache__\"]:\n append = False\n break\n\n dir_name = task_dir.replace(tasks_base_dir, \"\")[1:]\n if append and dir_name not in available_tasks_by_path:\n\n task_id = xxhash.xxh64(task_dir).hexdigest()\n\n task_info = {\n \"id\": task_id,\n \"path\": dir_name,\n \"base_dir\": tasks_base_dir,\n \"description\": get_section_from_task_doc(task_dir)\n or \"no description\",\n }\n available_tasks_by_path[dir_name] = task_info\n try:\n task = JinjamatorTask()\n log.debug(app.config[\"JINJAMATOR_FULL_CONFIGURATION\"])\n task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n\n task.load(\n os.path.join(task_info[\"base_dir\"], task_info[\"path\"])\n )\n with app.app_context():\n data = json.loads(\n jsonify(\n task.get_jsonform_schema()[\"schema\"]\n ).data.decode(\"utf-8\")\n )\n task_models[task_info[\"path\"]] = api.schema_model(task_id, data)\n del task\n\n log.info(f\"registered model for task {task_dir}\")\n\n dynamic_role_name = f\"task_{dir_name}\"\n new_role = JinjamatorRole(name=dynamic_role_name)\n\n with app.app_context():\n db.session.add(new_role)\n try:\n db.session.commit()\n except Exception:\n pass\n\n @ns.route(f\"/{task_info['path']}\", endpoint=task_info[\"path\"])\n class APIJinjamatorTask(Resource):\n @api.doc(\n f\"get_task_{task_info['path'].replace(os.path.sep,'_')}_schema\"\n )\n @api.expect(task_arguments)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def get(self):\n \"\"\"\n Returns the json-schema or the whole alpacajs configuration data for the task\n \"\"\"\n\n args = task_arguments.parse_args(request)\n schema_type = args.get(\"schema-type\", \"full\")\n try:\n preload_data = json.loads(\n args.get(\"preload-data\", \"{}\")\n )\n except TypeError:\n preload_data = {}\n preload_data = remove_redacted(preload_data)[1]\n environment_site = args.get(\n \"preload-defaults-from-site\"\n )\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n inner_task = JinjamatorTask()\n\n inner_task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n inner_task.configuration.merge_dict(preload_data)\n\n inner_task.load(relative_task_path)\n\n if environment_site not in [None, \"None\", \"\"]:\n inner_task._configuration[\n \"jinjamator_site_path\"\n ] = site_path_by_name.get(environment_site)\n inner_task._configuration[\n \"jinjamator_site_name\"\n ] = environment_site\n env_name, site_name = environment_site.split(\"/\")\n roles = [\n role[\"name\"]\n for role in g._user.get(\"roles\", [])\n ]\n if (\n f\"environment_{env_name}|site_{site_name}\"\n in roles\n or f\"environments_all\" in roles\n or f\"administrator\" in roles\n ):\n inner_task.configuration.merge_yaml(\n \"{}/defaults.yaml\".format(\n site_path_by_name.get(environment_site)\n )\n )\n else:\n abort(\n 403,\n f\"User neither has no role environment_{env_name}|site_{site_name} nor environments_all nor administrator. Access denied.\",\n )\n\n full_schema = inner_task.get_jsonform_schema()\n\n if schema_type in [\"\", \"full\"]:\n response = jsonify(full_schema)\n elif schema_type in [\"schema\"]:\n response = jsonify(full_schema.get(\"schema\", {}))\n elif schema_type in [\"data\"]:\n response = jsonify(full_schema.get(\"data\", {}))\n elif schema_type in [\"options\"]:\n response = jsonify(full_schema.get(\"options\", {}))\n elif schema_type in [\"view\"]:\n response = jsonify(full_schema.get(\"view\", {}))\n del inner_task\n return response\n\n @api.doc(\n f\"create_task_instance_for_{task_info['path'].replace(os.path.sep,'_')}\"\n )\n @api.expect(task_models[task_info[\"path\"]], validate=False)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def post(self):\n \"\"\"\n Creates an instance of the task and returns the job_id\n \"\"\"\n\n from jinjamator.task.celery import run_jinjamator_task\n from jinjamator.daemon.database import db\n\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n data = request.get_json()\n job_id = str(uuid.uuid4())\n user_id = g._user[\"id\"]\n\n job = run_jinjamator_task.apply_async(\n [\n relative_task_path,\n data,\n data.get(\"output_plugin\", \"console\"),\n user_id,\n ],\n task_id=job_id,\n created_by_user_id=user_id,\n )\n\n db_job = list(\n db.session.query(DB_Job).filter(\n DB_Job.task_id == job.id\n )\n )\n db_job = db_job and db_job[0]\n if not db_job:\n db_job = DB_Job(job.id)\n db_job.status = \"SCHEDULED\"\n db_job.configuration = data\n db_job.jinjamator_task = relative_task_path\n db_job.created_by_user_id = user_id\n db.session.add(db_job)\n db.session.flush()\n db.session.commit()\n\n return jsonify({\"job_id\": job.id})\n\n if task_info[\"description\"]:\n post.__doc__ += task_info[\"description\"]\n get.__doc__ += task_info[\"description\"]\n\n except Exception as e:\n import traceback\n\n log.error(\n f\"unable to register {task_dir}: {e} {traceback.format_exc()}\"\n )",
"def process(self, element, bucket, ingestFrom, all_edits):\n if isinstance(element, basestring):\n chunk_name = element\n page_ids = None\n else:\n # If specified on selected pages.\n (chunk_name, data) = element\n if data['datadump'] == []:\n logging.info('USERLOG: chunk %s skipped.', chunk_name)\n return\n page_ids = data['selected_pages']\n logging.info('USERLOG: Running ingestion process on %s.', chunk_name)\n if ingestFrom == 'local':\n input_stream = chunk_name\n else:\n cmd = \"gsutil -m cp {path} {chunk}\".format(filepath=path.join('gs://', bucket, chunk_name), chunk=chunk_name)\n status = os.WEXITSTATUS(os.system(cmd))\n if status != 0:\n raise Exception(\"GSUTIL COPY Error, exited with status {}\".format(status))\n input_stream = chunk_name\n # Running ingestion on the xml file\n last_revision = None\n last_completed = time.time()\n i = 0\n for i, content in enumerate(parse_stream(bz2.BZ2File(chunk_name))):\n if (page_ids is not None) and (content['page_id'] not in page_ids):\n continue\n self.processed_revisions.inc()\n if content['text'] is None:\n content['text'] = \"\"\n if (content[\"comment\"] is not None and \"POV\" in content[\"comment\"]) or all_edits:\n yield json.dumps((last_revision, content))\n last_revision = content\n logging.info('INGESTION_LOG: CHUNK %(chunk): revision %(revid) ingested, time elapsed: %(time).',\n extra={'chunk':chunk_name, 'revid':content['rev_id'], 'time':time.time() - last_completed))\n last_completed = time.time()\n if ingestFrom != 'local': os.system(\"rm %s\" % chunk_name)\n logging.info('USERLOG: Ingestion on file %(chunk) complete! %(cnt) lines emitted',\n extra={'chunk':chunk_name, 'cnt':i})",
"def tasks():",
"def create():\n logging.info('\"Create\" task started using config file %s', args.config)\n input_csv = os.path.join(config['input_dir'], config['input_csv'])\n if os.path.exists(input_csv):\n # Store a dictionary of id_field values: node IDs so we can add child nodes.\n node_ids = dict()\n\n field_definitions = get_field_definitions(config)\n with open(input_csv) as csvfile:\n csv_data = csv.DictReader(csvfile, delimiter=config['delimiter'])\n csv_column_headers = csv_data.fieldnames\n\n node_endpoint = config['host'] + '/node?_format=json'\n\n for row in csv_data:\n row = clean_csv_values(row)\n id_field = row[config['id_field']]\n\n # Add required fields.\n node = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': row['title']}\n ],\n 'status': [\n {'value': config['published']}\n ]\n }\n\n # If a node with an ID that matches the current item's\n # 'parent_id' value has just been created, make the item\n # a child of the node.\n if 'parent_id' in row.keys() and row['parent_id'] in node_ids:\n row['field_member_of'] = node_ids[row['parent_id']]\n\n # Add custom (non-required) CSV fields.\n required_fields = ['file', config['id_field'], 'title']\n custom_fields = list(\n set(csv_column_headers) - set(required_fields))\n for custom_field in custom_fields:\n if not isinstance(row[custom_field], str):\n continue\n # Skip updating field if value is empty.\n if len(row[custom_field]) == 0:\n continue\n\n # This field can exist in the CSV to create parent/child\n # relationships and is not a Drupal field.\n if custom_field == 'parent_id':\n continue\n\n # 'langcode' is a core Drupal field, but is not considered a \"base field\".\n if custom_field == 'langcode':\n continue\n\n # Execute field preprocessor scripts, if any are configured. Note that these scripts\n # are applied to the entire value from the CSV field and not split field values,\n # e.g., if a field is multivalued, the preprocesor must split it and then reassemble\n # it back into a string before returning it. Note that preprocessor scripts work only\n # on string data and not on binary data like images, etc. and only on custom fields\n # (so not title).\n if 'preprocessors' in config and len(config['preprocessors']) > 0:\n for field, command in config['preprocessors'].items():\n if field in csv_column_headers:\n output, return_code = preprocess_field_data(config['subdelimiter'], row[field], command)\n if return_code == 0:\n preprocessor_input = copy.deepcopy(row[field])\n row[field] = output.decode().strip()\n logging.info('Preprocess command %s executed, taking \"%s\" as input and returning \"%s\".', command, preprocessor_input, output.decode().strip())\n else:\n message = 'Preprocess command ' + command + ' failed with return code ' + str(return_code)\n logging.error(message)\n sys.exit(message)\n\n # Assemble Drupal field structures for entity reference fields from CSV data. For\n # taxonomy terms, target_type is 'taxonomy_term'; for nodes, it's 'node_type'.\n if field_definitions[custom_field]['field_type'] == 'entity_reference':\n if field_definitions[custom_field]['target_type'] == 'taxonomy_term':\n target_type = 'taxonomy_term'\n field_vocabs = get_field_vocabularies(config, field_definitions, custom_field)\n if config['subdelimiter'] in row[custom_field]:\n prepared_tids = []\n delimited_values = row[custom_field].split(config['subdelimiter'])\n for delimited_value in delimited_values:\n tid = prepare_term_id(config, field_vocabs, delimited_value)\n tid = str(tid)\n prepared_tids.append(tid)\n row[custom_field] = config['subdelimiter'].join(prepared_tids)\n else:\n row[custom_field] = prepare_term_id(config, field_vocabs, row[custom_field])\n row[custom_field] = str(row[custom_field])\n\n if field_definitions[custom_field]['target_type'] == 'node':\n target_type = 'node_type'\n\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = field_values\n else:\n node[custom_field] = [\n {'target_id': row[custom_field],\n 'target_type': target_type}]\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = field_values[:field_definitions[custom_field]['cardinality']]\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n else:\n node[custom_field] = [\n {'target_id': row[custom_field],\n 'target_type': target_type}]\n # Cardinality is 1.\n else:\n subvalues = row[custom_field].split(config['subdelimiter'])\n node[custom_field] = [\n {'target_id': subvalues[0],\n 'target_type': target_type}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # Typed relation fields.\n elif field_definitions[custom_field]['field_type'] == 'typed_relation':\n target_type = field_definitions[custom_field]['target_type']\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value\n # Cardinality is 1.\n else:\n field_values = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value[0]\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # Geolocation fields.\n elif field_definitions[custom_field]['field_type'] == 'geolocation':\n target_type = field_definitions[custom_field]['target_type']\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality is 1.\n else:\n field_values = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value[0]\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # For non-entity reference and non-typed relation fields (text, integer, boolean etc.).\n else:\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], row[custom_field])\n node[custom_field] = [{'value': row[custom_field]}]\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], row[custom_field])\n node[custom_field] = [{'value': row[custom_field]}]\n # Cardinality is 1.\n else:\n subvalues = row[custom_field].split(config['subdelimiter'])\n first_subvalue = subvalues[0]\n first_subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], first_subvalue)\n node[custom_field] = [{'value': first_subvalue}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n node_headers = {'Content-Type': 'application/json'}\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('Node for \"' + row['title'] + '\" (record ' + id_field + ') created at ' + node_uri + '.')\n logging.info(\"Node for %s (record %s) created at %s.\", row['title'], id_field, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, id_field, node_response.text)\n else:\n logging.error(\"Node for CSV record %s not created, HTTP response code was %s.\", id_field, node_response.status_code)\n continue\n\n # Map ID from CSV of newly created node to its node ID so we can use it for linking child nodes, etc.\n if node_response.status_code == 201:\n node_nid = node_uri.rsplit('/', 1)[-1]\n node_ids[id_field] = node_nid\n\n # If there is no media file (and we're not creating paged content), move on to the next CSV row.\n if 'file' in row and len(row['file']) == 0 and config['paged_content_from_directories'] is False:\n print('+No media for ' + node_uri + ' created since its \"file\" field in the CSV is empty.')\n logging.warning(\"No media for %s created since its 'file' field in the CSV is empty.\", node_uri)\n continue\n\n # If there is a media file, add it.\n if 'file' in row:\n file_path = os.path.join(config['input_dir'], row['file'])\n media_type = set_media_type(file_path, config)\n\n if node_response.status_code == 201:\n # If what is identified in the 'file' field is a file, create the media from it.\n if 'file' in row and len(row['file']) != 0 and os.path.isfile(file_path):\n media_response_status_code = create_media(config, row['file'], node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+' + media_type.title() + \" media for \" + row['file'] + \" created.\")\n logging.info(\"%s media for %s created.\", media_type.title(), row['file'])\n\n if 'file' in row and len(row['file']) == 0 and config['paged_content_from_directories'] is False:\n print('+ No file specified in CSV for ' + row['title'])\n logging.info(\"No file specified for %s, so no media created.\", id_field)\n\n if config['paged_content_from_directories'] is True:\n # Console output and logging are done in the create_children_from_directory function.\n create_children_from_directory(config, row, node_nid, row['title'])",
"def app_tasks(name, path):\n @task(pre=reset_project.pre, name=\"reset_project\")\n def _reset_project(ctx):\n reset_project(ctx, path)\n\n _reset_project.__doc__ = \"Reset Mynewt project files for {}\".format(name)\n\n @task(pre=install_project.pre, name=\"install_project\")\n def _install_project(ctx):\n install_project(ctx, path)\n\n _install_project.__doc__ = \"Install Mynewt project dependencies for {}\".format(name)\n\n @task(pre=build.pre, name=\"build\")\n def _build(ctx, export_path=None, board=None):\n build(ctx, name, path, export_path, board)\n\n _build.__doc__ = \"Build {} for Pylon\".format(name)\n\n @task(pre=run.pre, name=\"run\")\n def _run(ctx, sn=None, board=None): # pylint: disable=C0103\n run(ctx, name, path, sn, board)\n\n _run.__doc__ = \"Flash and run {} on Pylon\".format(name)\n\n @task(pre=debug.pre, name=\"debug\")\n def _debug(ctx, sn=None, port=None, board=None): # pylint: disable=C0103\n debug(ctx, name, path, sn, port, board)\n\n _debug.__doc__ = \"Debug {} on Pylon\".format(name)\n\n return _install_project, _reset_project, _build, _run, _debug",
"def process(msg, context, region):\n\n job_id = int(msg['ingest_job'])\n chunk_key = msg['chunk_key']\n tile_key = msg['tile_key']\n print(\"Tile key: {}\".format(tile_key))\n\n proj_info = BossIngestProj.fromTileKey(tile_key)\n\n # Set the job id\n proj_info.job_id = msg['ingest_job']\n\n print(\"Data: {}\".format(msg))\n\n # update value in the dynamo table\n tile_index_db = BossTileIndexDB(proj_info.project_name)\n chunk = tile_index_db.getCuboid(chunk_key, job_id)\n if chunk:\n if tile_index_db.cuboidReady(chunk_key, chunk[\"tile_uploaded_map\"]):\n print(\"Chunk already has all its tiles: {}\".format(chunk_key))\n # Go ahead and setup to fire another ingest lambda so this tile\n # entry will be deleted on successful execution of the ingest lambda.\n chunk_ready = True\n else:\n print(\"Updating tile index for chunk_key: {}\".format(chunk_key))\n chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)\n else:\n # First tile in the chunk\n print(\"Creating first entry for chunk_key: {}\".format(chunk_key))\n try:\n tile_index_db.createCuboidEntry(chunk_key, job_id)\n except ClientError as err:\n # Under _exceptional_ circumstances, it's possible for another lambda\n # to beat the current instance to creating the initial cuboid entry\n # in the index.\n error_code = err.response['Error'].get('Code', 'Unknown')\n if error_code == 'ConditionalCheckFailedException':\n print('Chunk key entry already created - proceeding.')\n else:\n raise\n chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)\n\n # ingest the chunk if we have all the tiles\n if chunk_ready:\n print(\"CHUNK READY SENDING MESSAGE: {}\".format(chunk_key))\n # insert a new job in the insert queue if we have all the tiles\n ingest_queue = IngestQueue(proj_info)\n ingest_queue.sendMessage(json.dumps(msg))\n\n # Invoke Ingest lambda function\n names = AWSNames.from_lambda(context.function_name)\n lambda_client = boto3.client('lambda', region_name=region)\n lambda_client.invoke(\n FunctionName=names.tile_ingest.lambda_,\n InvocationType='Event',\n Payload=json.dumps(msg).encode())\n else:\n print(\"Chunk not ready for ingest yet: {}\".format(chunk_key))\n\n print(\"DONE!\")",
"def _issue_first_task(self):\n\n task_dict = {\n \"task\": \"prepare_iteration\",\n \"model\": str(self.model_path),\n \"iteration_number\": self.iteration_number,\n \"iteration_name\": f\"model_{self.iteration_number:05d}\",\n \"finished\": False,\n }\n\n with open(self.task_path, \"w+\") as fh:\n toml.dump(task_dict, fh)",
"def task():",
"def create_additional_tasks(testcase):\n # No need to create progression task. It is automatically created by the cron\n # handler.\n task_creation.create_impact_task_if_needed(testcase)\n task_creation.create_regression_task_if_needed(testcase)\n task_creation.create_symbolize_task_if_needed(testcase)\n task_creation.create_variant_tasks_if_needed(testcase)",
"def run(self):\n with hp.a_temp_file() as fle:\n fle.write(dedent(\"\"\"\n ---\n environments: { dev: {account_id: \"123\"} }\n stacks: { app: {} }\n \"\"\").encode('utf-8'))\n fle.seek(0)\n collector = Collector()\n collector.prepare(fle.name, {'bespin': {'extra': \"\"}, \"command\": None, \"bash\": None})\n\n section = nodes.section()\n section['ids'].append(\"available-tasks\")\n\n title = nodes.title()\n title += nodes.Text(\"Default tasks\")\n section += title\n\n for name, task in sorted(collector.configuration['task_finder'].tasks.items(), key=lambda x: len(x[0])):\n\n lines = [name] + [\" {0}\".format(line.strip()) for line in task.description.split('\\n')]\n viewlist = ViewList()\n for line in lines:\n viewlist.append(line, name)\n self.state.nested_parse(viewlist, self.content_offset, section)\n\n return [section]",
"def new_tasks(self, extra):\n tasks = []\n\n for parameter in self._enumerate_csv(self.params.csv_input_file):\n parameter_str = '.'.join(str(x) for x in parameter)\n jobname = \"gpfi-%s\" % parameter_str\n\n extra_args = extra.copy()\n\n extra_args['jobname'] = jobname\n \n extra_args['output_dir'] = self.params.output\n extra_args['output_dir'] = extra_args['output_dir'].replace('NAME', jobname)\n extra_args['output_dir'] = extra_args['output_dir'].replace('SESSION', jobname)\n extra_args['output_dir'] = extra_args['output_dir'].replace('DATE', jobname)\n extra_args['output_dir'] = extra_args['output_dir'].replace('TIME', jobname)\n\n self.log.debug(\"Creating Application for parameter : %s\" %\n (parameter_str))\n\n tasks.append(GpfiApplication(\n parameter,\n self.param.model,\n **extra_args))\n\n return tasks",
"def test_case_3(self):\n config_generator = Genesis()\n\n data = open_json_file('r1.json')\n\n task_1 = Task.new(data=data, name=\"R1_BASE_CONFIG\")\n task_2 = Task.new(data=data, name=\"R1_BASE_CONFIG_2\")\n\n config_generator.add_task(task_1)\n config_generator.add_task(task_2)\n\n results = config_generator.generate()\n self.assertIsInstance(results, dict)\n\n for task in config_generator.tasks:\n self.assertTrue(task.is_complete)\n self.assertIsInstance(task.rendered_data, str)",
"def prepare_submit(self, mapping):\n self.dag_path = self.mk_path('%(mex_id)s.dag', mapping)\n self.create_file(self.dag_path,\n self.template['condor.dag_template'], mapping)\n\n self.conf_path = self.mk_path('%(mex_id)s.dag.config', mapping)\n self.create_file(self.conf_path,\n self.template['condor.dag_config_template'], mapping)\n\n self.submit_path = self.mk_path('%(mex_id)s.cmd', mapping)\n self.create_file(self.submit_path,\n self.template['condor.submit_template'], mapping)",
"def insert_tasks(note, exer_tasks):\n for i in range(len(exer_tasks)):\n if 'graphml' in exer_tasks[i]:\n insert_single_graph_task(note,exer_tasks[i],i)\n else:\n insert_single_task(note,exer_tasks[i],i)\n return",
"def new_tasks(self, extra):\n\n tasks = []\n\n try:\n fd = open(self.params.command_file)\n \n self.result_dir = os.path.dirname(self.params.output)\n \n for line in fd:\n command = line.strip()\n\n if not command:\n # ignore black lines\n continue\n\n cmd_args = _parse_command(command)\n \n # setting jobname\n jobname = \"gc_gps-%s%s%s%s%s\" % (cmd_args['pos'],\n cmd_args['realizations'],\n cmd_args['snr'],\n cmd_args['mast.h'],\n cmd_args['sd.mast.o'])\n\n extra_args = extra.copy()\n extra_args['jobname'] = jobname\n # FIXME: ignore SessionBasedScript feature of customizing \n # output folder\n extra_args['output_dir'] = self.params.output\n\n extra_args['output_dir'] = extra_args['output_dir'].replace('NAME', os.path.join('.computation',jobname))\n extra_args['output_dir'] = extra_args['output_dir'].replace('SESSION', os.path.join('.computation',jobname))\n extra_args['output_dir'] = extra_args['output_dir'].replace('DATE', os.path.join('.computation',jobname))\n extra_args['output_dir'] = extra_args['output_dir'].replace('TIME', os.path.join('.computation',jobname))\n\n self.log.debug(\"Creating Task for command: %s\" % command)\n\n tasks.append(GcgpsTask(\n command,\n self.params.R_source_folder,\n self.result_dir,\n self.params.input_dir,\n **extra_args))\n\n except IOError, ioe:\n self.log.error(\"Error while reading command file \" +\n \"%s.\" % self.params.command_file +\n \"Message: %s\" % ioe.message)\n except Exception, ex:\n self.log.error(\"Unexpected error. Error type: %s, Message: %s\" % (type(ex),str(ex)))\n\n finally:\n fd.close()\n\n return tasks",
"def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs",
"def _insertAllSteps(self):\n \n # Get pointer to input micrographs \n self.particlePickingRun = self.xmippParticlePicking.get()\n \n copyId = self._insertFunctionStep('copyInputFilesStep')\n # Get micrographs to pick\n #self.inputMicrographs.set(self.getInputMicrographs())\n \n deps = []\n for mic in self.getInputMicrographs():\n stepId = self._insertFunctionStep('autopickMicrographStep', mic.getFileName(), prerequisites=[copyId])\n deps.append(stepId)\n \n self._insertFunctionStep('_createOutput',self._getExtraPath(), prerequisites=deps)",
"def _generate_and_load_initial_batch(self, working_directory: Path):\n\n template_dir = Path(working_directory) / \"template_1\"\n template_dir.mkdir()\n # changes here should often be reflected in\n # data_generator_opts and data_loader_opts\n\n channel_decl = self.channel_configs[0]\n\n plugin_options = {\n \"pid\": \"0\",\n \"big_ids\": \"True\",\n }\n # if it's efficient to do the whole load in one go, let's just do that.\n if self.run_until.gap < MIN_PORTION_SIZE:\n num_records = self.run_until.gap\n else:\n num_records = 1 # smallest possible batch to get to parallelizing fast\n results = self._generate_and_load_batch(\n template_dir,\n channel_decl.org_config,\n {\n \"generator_yaml\": self.options.get(\"recipe\"),\n \"num_records\": num_records,\n \"num_records_tablename\": self.run_until.sobject_name or COUNT_REPS,\n \"loading_rules\": self.loading_rules,\n \"vars\": channel_decl.merge_recipe_options(self.recipe_options),\n \"plugin_options\": plugin_options,\n \"bulk_mode\": self.bulk_mode,\n },\n )\n self.update_running_totals_from_load_step_results(results)\n\n # rename directory to reflect real number of sets created.\n wd = SnowfakeryWorkingDirectory(template_dir)\n if self.run_until.sobject_name:\n self.sets_finished_while_generating_template = wd.get_record_counts()[\n self.run_until.sobject_name\n ]\n else:\n self.sets_finished_while_generating_template = num_records\n\n new_template_dir = data_loader_new_directory_name(template_dir, self.run_until)\n shutil.move(template_dir, new_template_dir)\n template_dir = new_template_dir\n\n # don't send data tables to child processes. All they\n # care about are ID->OID mappings\n wd = SnowfakeryWorkingDirectory(template_dir)\n self._cleanup_object_tables(*wd.setup_engine())\n\n return template_dir, wd.relevant_sobjects()",
"def prepare_run(self, **kwargs):\n super().prepare_run(**kwargs)\n with open(\n os.path.join(self.rally_dir, 'rally_jobs.yaml'),\n 'r', encoding='utf-8') as task_file:\n self.task_yaml = yaml.safe_load(task_file)\n\n for task in self.task_yaml:\n if task not in self.tests:\n raise Exception(f\"Test '{task}' not in '{self.tests}'\")",
"def upload_tasks(self, batch_id = None, input_file='', file_type = 'Excel'):\n url = self._base_url + urlConfig.URLS['Project'] + '/v2/' + self._project_id + \"/batch/\" + batch_id + \"/tasks/import?fileType=\" + file_type\n \n \n try: \n if input_file.endswith('.gz'):\n fileContent = open(input_file, 'rb').read()\n encoded = base64.b64encode(fileContent)\n else:\n fileContent = open(input_file, 'rb').read()\n with gzip.open('file.txt.gz', 'wb') as f:\n f.write(fileContent)\n fileContent = open('file.txt.gz', 'rb').read()\n encoded = base64.b64encode(fileContent)\n os.remove('file.txt.gz')\n response = requests.post(url, encoded, headers = {\n 'Content-Type': 'text/plain',\n 'Authorization': 'Bearer ' + self._get_token()})\n logging.debug(response.json())\n parsed = response.json()\n job_id = parsed['job_id']\n logging.debug('job id = %s', job_id)\n return job_id\n \n except Exception as e: print(e)"
] |
[
"0.63344",
"0.62111056",
"0.61512274",
"0.6071256",
"0.59026337",
"0.58242923",
"0.58159286",
"0.5714517",
"0.55866283",
"0.558229",
"0.5534695",
"0.55086124",
"0.55020404",
"0.5470382",
"0.53798485",
"0.53627956",
"0.534799",
"0.53423345",
"0.53301287",
"0.5326683",
"0.5310019",
"0.526622",
"0.52615154",
"0.5258788",
"0.5217613",
"0.52150375",
"0.51903784",
"0.51858085",
"0.5181038",
"0.51667356"
] |
0.7282232
|
0
|
you can use this to fix black spots from when downsample tasks fail by specifying a point inside each black spot.
|
def create_fixup_downsample_tasks(task_queue, layer_path, points, shape=Vec(2048, 2048, 64), mip=0, axis='z'):
vol = CloudVolume(layer_path, mip)
offsets = compute_fixup_offsets(vol, points, shape)
for offset in tqdm(offsets, desc="Inserting Corrective Downsample Tasks"):
task = DownsampleTask(
layer_path=layer_path,
mip=mip,
shape=shape,
offset=offset,
axis=axis,
)
task_queue.insert(task)
task_queue.wait('Uploading')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def CleanBadPixels(spectraUp,spectraDown):\n \n Clean_Up= []\n Clean_Do = []\n Clean_Av = []\n eps=25. # this is the minumum background Please check\n NBSPEC=len(spectraUp)\n for index in np.arange(0,NBSPEC):\n s_up=spectraUp[index]\n s_do=spectraDown[index]\n \n index_up=np.where(s_up<eps)\n index_do=np.where(s_do<eps)\n \n s_up[index_up]=s_do[index_up]\n s_do[index_do]=s_up[index_do]\n s_av=(s_up+s_do)/2.\n \n Clean_Up.append(s_up)\n Clean_Do.append(s_do)\n Clean_Av.append(s_av)\n \n return Clean_Up, Clean_Do,Clean_Av",
"def downsample_sam(self, factor):",
"def stop_procrastinating(self):\n for layer, l in sorted(self.postponed.items()):\n for fun, args, rgb in l:\n self.set_source_rgb(*rgb)\n fun(*args, procrastinate=0)\n self.postponed = {}",
"def test_BBBP_erroneous_data_removed_tailored(self):\n cp = Plotter.from_smiles(self.data_BBBP_erroneous_smiles[\"smiles\"], target=self.data_BBBP_erroneous_smiles[\"target\"], target_type=\"C\", sim_type=\"tailored\")\n self.assertEqual(len(cp._Plotter__df_descriptors.index), len(self.data_BBBP_erroneous_smiles.index) - (len(self.list_BBBP_erroneous_smiles) + len(self.list_BBBP_erroneous_descriptors)))",
"def downsampleShape(self, numDesiredPoints):\n\n if len(self.x) > 2:\n t_current_x = np.linspace(0, 1, len(self.x))\n t_current_y = np.linspace(0, 1, len(self.y))\n t_desired_x = np.linspace(0, 1, numDesiredPoints)\n t_desired_y = np.linspace(0, 1, numDesiredPoints)\n f = interpolate.interp1d(t_current_x, self.x, kind='linear')\n self.x = f(t_desired_x).tolist()\n f = interpolate.interp1d(t_current_y, self.y, kind='linear')\n self.y = f(t_desired_y).tolist()\n\n self.len = numDesiredPoints",
"def testTinttsysMapNNSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)",
"def grass_drass():",
"def testTinttsysMapNN(self):\n self._runTest('tinttsys', False, [1,3,5,7,15], 'nearest,nearest',self.spwmap)",
"def downsample(self, factor):\n self.img = self.img[::factor, ::factor, :] if self.fast else self.img\n self.comb_structure_mask = self.comb_structure_mask[::factor, ::factor]\n self.unknown_mask = self.unknown_mask[::factor, ::factor]",
"def stitch(tiles, dest):\n pass",
"def cleanup(self):\n self.subpixel, self.pixel = self.stepup(self.subpixel, self.pixel, AxisDistance.pixelsize)\n self.pixel, self.tile = self.stepup(self.pixel, self.tile, AxisDistance.tilesize)",
"def sgr_segmentation(img,seed_values,error_threshold=5):\n\n img_copy = np.copy(img)\n\n m,n = img_copy.shape\n\n seed_points = get_seed_points(img_copy,seed_values)\n\n vis = [[False for j in range(n)] for i in range(m)]\n\n for i,j in seed_points:\n if vis[i][j] == False:\n dfs_sgr_segmentation(i,j,img_copy,vis,error_threshold,m,n)\n\n for i,j in it.product(range(m),range(n)):\n if img_copy[i,j] not in seed_values:\n img_copy[i,j] = 0\n\n return img_copy",
"def approching_blackhole():\n blackhole = BlackHole()\n Rs = 8.0\n D_list = np.round(10**np.linspace(np.log10(50), np.log10(100000), 30))\n blackhole.open(blackhole.img_name, size=2000)\n\n for D in D_list:\n blackhole.compute(Rs, D)\n blackhole.img_save()",
"def testDipoleTask(self):\n sources = self.runDetection()\n\n offsets = self.params.offsets\n for i, r1 in enumerate(sources):\n result = r1.extract(\"ip_diffim_DipoleFit*\")\n self.assertClose((result['ip_diffim_DipoleFit_pos_flux'] +\n abs(result['ip_diffim_DipoleFit_neg_flux']))/2.,\n self.params.flux[i], rtol=0.02)\n self.assertClose(result['ip_diffim_DipoleFit_pos_centroid_x'],\n self.params.xc[i] + offsets[i], rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_pos_centroid_y'],\n self.params.yc[i] + offsets[i], rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_neg_centroid_x'],\n self.params.xc[i] - offsets[i], rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_neg_centroid_y'],\n self.params.yc[i] - offsets[i], rtol=0.01)\n # Note this is dependent on the noise (variance) being realistic in the image.\n # otherwise it throws off the chi2 estimate, which is used for classification:\n self.assertTrue(result['ip_diffim_DipoleFit_flag_classification'])\n\n # compare to the original ip_diffim_PsfDipoleFlux measurements\n result2 = r1.extract(\"ip_diffim_PsfDipoleFlux*\")\n self.assertClose((result['ip_diffim_DipoleFit_pos_flux'] +\n abs(result['ip_diffim_DipoleFit_neg_flux']))/2.,\n (result2['ip_diffim_PsfDipoleFlux_pos_flux'] +\n abs(result2['ip_diffim_PsfDipoleFlux_neg_flux']))/2.,\n rtol=0.02)\n self.assertClose(result['ip_diffim_DipoleFit_pos_centroid_x'],\n result2['ip_diffim_PsfDipoleFlux_pos_centroid_x'],\n rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_pos_centroid_y'],\n result2['ip_diffim_PsfDipoleFlux_pos_centroid_y'],\n rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_neg_centroid_x'],\n result2['ip_diffim_PsfDipoleFlux_neg_centroid_x'],\n rtol=0.01)\n self.assertClose(result['ip_diffim_DipoleFit_neg_centroid_y'],\n result2['ip_diffim_PsfDipoleFlux_neg_centroid_y'],\n rtol=0.01)\n\n if self.params.display:\n DipolePlotUtils.displayCutouts(r1, self.testImage.diffim,\n self.testImage.posImage, self.testImage.negImage)\n if self.params.display:\n DipolePlotUtils.plt.show()\n\n return result",
"def blackcover(model, X, y, width, height, xskip, yskip):\n\t#wideth:44 , height:22, xship:22. yship:22\n max_loss = torch.zeros(y.shape[0]).to(y.device)\n max_delta = torch.ones_like(X).to(y.device)\n xtimes = 224//xskip\n ytimes = 224//yskip\n\n for i in range(xtimes):\n for j in range(ytimes):\n\n blackcover = np.ones([224,224,3]).astype(np.float32)*255\n blackcover[yskip*j:(yskip*j+height),xskip*i:(xskip*i+width),:] = 0 \n blackcover = transforms.ToTensor()(blackcover).to(y.device)\n\n #print(blackcover[:,1,1])\n # out = torchvision.utils.make_grid(blackcover)\n # imshow(out)\n \n\n all_loss = nn.CrossEntropyLoss(reduction='none')(model( X*blackcover), y )\n if(all_loss>=max_loss):\n max_delta = blackcover.detach()\n max_loss = torch.max(max_loss, all_loss)\n \n return max_delta",
"def setup_mask(self, d25scale): \n\n logger = logging.getLogger(name=\"ShotSensitivity\")\n \n # see if this is a bad shot\n #print(\"Bad shot from \", self.conf.badshot)\n badshot = loadtxt(self.conf.badshot, dtype=int)\n badtpshots = loadtxt(self.conf.lowtpshots, dtype=int)\n if (self.shotid in badshot) or (self.shotid in badtpshots):\n logger.warn(\"Shot is in bad. Making mask zero everywhere\")\n self.badshot = True\n else:\n self.badshot = False\n \n # set up bad amps\n logger.info(\"Bad amps from {:s}\".format(self.conf.badamp))\n self.bad_amps = Table.read(self.conf.badamp)\n sel_shot = (self.bad_amps[\"shotid\"] == self.shotid)\n self.bad_amps = self.bad_amps[sel_shot]\n \n # set up galaxy mask\n logger.info(\"Galaxy mask from {:s}\".format(self.conf.rc3cat))\n galaxy_cat = Table.read(self.conf.rc3cat, format='ascii')\n gal_coords = SkyCoord(galaxy_cat['Coords'], frame='icrs')\n shot_coords = SkyCoord(ra=self.shot_ra, dec=self.shot_dec,\n unit=\"deg\")\n sel_reg = where(shot_coords.separation(gal_coords) < 1.*u.deg)[0]\n\n self.gal_regions = []\n if len(sel_reg) > 0:\n for idx in sel_reg:\n self.gal_regions.append(create_gal_ellipse(galaxy_cat, \n row_index=idx, \n d25scale=d25scale))\n \n # set up meteor mask\n # check if there are any meteors in the shot:\n logger.info(\"Meteors from {:s}\".format(self.conf.meteor))\n self.met_tab = Table.read(self.conf.meteor, format=\"ascii\")\n self.met_tab = self.met_tab[self.shotid == self.met_tab[\"shotid\"]]",
"def test_without_target(self, X, y):\n try:\n resize_batch(X, y, 1.0, 'crop', resize_targets=False)\n except:\n pytest.fail('apply_progressive_resizing failed with y == None')",
"def switch_points(mutated_genome,index):\n point_index1 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n point_index2 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n temp = mutated_genome[index][2][point_index1]\n mutated_genome[index][2][point_index1] = mutated_genome[index][2][point_index2]\n mutated_genome[index][2][point_index2] = temp",
"def discard_tile(self):\n raise NotImplemented()",
"def set_black(self, x, y):\n self.pieces[x + (y * self.width)].set_black()",
"def _seg_image(self, x, y, r_cut=100):\n snr=self.snr\n npixels=self.npixels\n bakground = self.bakground\n error= self.bkg_rms(x,y,r_cut)\n kernel = self.kernel\n image_cutted = self.cut_image(x,y,r_cut)\n image_data = image_cutted\n threshold_detect_objs=detect_threshold(data=image_data, nsigma=snr,error=error)\n segments=detect_sources(image_data, threshold_detect_objs, npixels=npixels, filter_kernel=kernel)\n segments_deblend = deblend_sources(image_data, segments, npixels=npixels,nlevels=10)\n segments_deblend_info = source_properties(image_data, segments_deblend)\n nobjs = segments_deblend_info.to_table(columns=['id'])['id'].max()\n xcenter = segments_deblend_info.to_table(columns=['xcentroid'])['xcentroid'].value\n ycenter = segments_deblend_info.to_table(columns=['ycentroid'])['ycentroid'].value\n image_data_size = np.int((image_data.shape[0] + 1) / 2.)\n dist = ((xcenter - image_data_size) ** 2 + (ycenter - image_data_size) ** 2) ** 0.5\n c_index = np.where(dist == dist.min())[0][0]\n center_mask=(segments_deblend.data==c_index+1)*1 #supposed to be the data mask\n obj_masks = []\n for i in range(nobjs):\n mask = ((segments_deblend.data==i+1)*1)\n obj_masks.append(mask)\n xmin = segments_deblend_info.to_table(columns=['bbox_xmin'])['bbox_xmin'].value\n xmax = segments_deblend_info.to_table(columns=['bbox_xmax'])['bbox_xmax'].value\n ymin = segments_deblend_info.to_table(columns=['bbox_ymin'])['bbox_ymin'].value\n ymax = segments_deblend_info.to_table(columns=['bbox_ymax'])['bbox_ymax'].value\n xmin_c, xmax_c = xmin[c_index], xmax[c_index]\n ymin_c, ymax_c = ymin[c_index], ymax[c_index]\n xsize_c = xmax_c - xmin_c\n ysize_c = ymax_c - ymin_c\n if xsize_c > ysize_c:\n r_center = np.int(xsize_c)\n else:\n r_center = np.int(ysize_c)\n center_mask_info= [center_mask, r_center, xcenter, ycenter, c_index]\n return obj_masks, center_mask_info, segments_deblend",
"def detectSpots(img, detectSpotsParameter = None, correctIlluminationParameter = None, removeBackgroundParameter = None,\n filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None,\n verbose = False, out = sys.stdout, **parameter):\n\n timer = Timer();\n \n # normalize data -> to check\n #img = img.astype('float');\n #dmax = 0.075 * 65535;\n #ids = img > dmax;\n #img[ids] = dmax;\n #img /= dmax; \n #out.write(timer.elapsedTime(head = 'Normalization'));\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[600:1000,:,800:830];\n \n # correct illumination\n correctIlluminationParameter = getParameter(detectSpotsParameter, \"correctIlluminationParameter\", correctIlluminationParameter);\n img1 = img.copy();\n img1 = correctIllumination(img1, correctIlluminationParameter = correctIlluminationParameter, verbose = verbose, out = out, **parameter) \n\n # background subtraction in each slice\n #img2 = img.copy();\n removeBackgroundParameter = getParameter(detectSpotsParameter, \"removeBackgroundParameter\", removeBackgroundParameter);\n img2 = removeBackground(img1, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter) \n \n # mask\n #timer.reset();\n #if mask == None: #explicit mask\n # mask = img > 0.01;\n # mask = binary_opening(mask, self.structureELement('Disk', (3,3,3)));\n #img[img < 0.01] = 0; # masking in place # extended maxima\n #out.write(timer.elapsedTime(head = 'Mask')); \n \n #DoG filter\n filterDoGParameter = getParameter(detectSpotsParameter, \"filterDoGParameter\", filterDoGParameter);\n dogSize = getParameter(filterDoGParameter, \"size\", None);\n #img3 = img2.copy(); \n img3 = filterDoG(img2, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter);\n \n # normalize \n # imax = img.max();\n # if imax == 0:\n # imax = 1;\n # img /= imax;\n \n # extended maxima\n findExtendedMaximaParameter = getParameter(detectSpotsParameter, \"findExtendedMaximaParameter\", findExtendedMaximaParameter);\n hMax = getParameter(findExtendedMaximaParameter, \"hMax\", None);\n imgmax = findExtendedMaxima(img3, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter);\n \n #center of maxima\n if not hMax is None:\n centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter);\n else:\n centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter);\n \n #cell size detection\n detectCellShapeParameter = getParameter(detectSpotsParameter, \"detectCellShapeParameter\", detectCellShapeParameter);\n cellShapeThreshold = getParameter(detectCellShapeParameter, \"threshold\", None);\n if not cellShapeThreshold is None:\n \n # cell shape via watershed\n imgshape = detectCellShape(img2, centers, detectCellShapeParameter = detectCellShapeParameter, verbose = verbose, out = out, **parameter);\n \n #size of cells \n csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter);\n \n #intensity of cells\n cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findCellIntensity(img2, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findCellIntensity(img3, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n #remove cell;s of size 0\n idz = csize > 0;\n \n return ( centers[idz], numpy.vstack((cintensity[idz], cintensity3[idz], cintensity2[idz], csize[idz])).transpose()); \n \n \n else:\n #intensity of cells\n cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findIntensity(img2, centers, verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findIntensity(img3, centers, verbose = verbose, out = out, **parameter);\n\n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n return ( centers, numpy.vstack((cintensity, cintensity3, cintensity2)).transpose());",
"def cleanBadPix(redux_science, bad_pixel_map, method = 'median', replacement_box = 5, replace_constant = -99):\n #add negative pixels to the bad pixel map\n bad_pixel_map = np.logical_or(bad_pixel_map, redux_science <= 0)\n # im = np.copy(redux_science)\n # im[np.where(bad_pixel_map)[1]] = 0.\n if method == 'median':\n med_fil = median_filter(redux_science, size = replacement_box)\n\n cleaned = redux_science*~bad_pixel_map + med_fil*bad_pixel_map\n\n #elif method == 'interpolate':\n\n # print('so clean')\n\n return cleaned",
"def on_epoch_end(self):\n if self.suffle == True:\n self.image_filenames, self.mask_names = shuffle(self.image_filenames, self.mask_names)",
"def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)",
"def shuffle_points(mutated_genome,index):\n random.shuffle(mutated_genome[index][2])",
"def abrupt_change_locations(DataCube,thresh=20):\n # Do a 3 pixels median filtering in the data to reduce the read noise in time series and preserve steps\n if np.ma.isMaskedArray(DataCube) :\n MFDataCube = filters.median_filter(DataCube.data,size=(3,1,1),mode='nearest')\n else:\n MFDataCube = filters.median_filter(DataCube,size=(3,1,1),mode='nearest')\n\n convimg = MFDataCube[:-3,:,:] -3*MFDataCube[1:-2,:,:] +3*MFDataCube[2:-1,:,:] -MFDataCube[3:,:,:] #Convolution of image with [1,-3,3,-1] along time axis\n \n medianconv = np.median(convimg,axis=0) #Wrongly nameing the variable now itself to conserve memory\n stdconv = np.median(np.abs(convimg-medianconv),axis=0) * 1.4826 # Convert MAD to std # MAD for robustnus\n #We should remove all the points where number of data points were less than 5\n stdconv[np.where(np.ma.count(DataCube,axis=0) < 5)] = 99999\n # We should also remove any spuriously small stdeviation, say those lees than median std deviation \n MedianNoise = np.median(stdconv)\n stdconv[np.where(stdconv < MedianNoise)] = MedianNoise #This will prevent spuriously small std dev estimates\n #Find the edges of ramp jumps.\n if np.ma.isMaskedArray(DataCube) :\n (T,I,J) = np.ma.where(np.ma.array(convimg-medianconv,mask=np.ma.getmaskarray(DataCube[3:,:,:])) > thresh*stdconv) \n else: \n (T,I,J) = np.ma.where(convimg-medianconv > thresh*stdconv)\n T=T+2 #Converting to the original time coordinate of DataCube by correcting for the shifts. This T is the first pixel after CR hit\n return T,I,J",
"def test_bad_node_select(self, dim):\r\n graph = nx.barbell_graph(dim, 0)\r\n s = list(range(2 * dim))\r\n with pytest.raises(ValueError, match=\"Node selection method not recognized\"):\r\n clique.shrink(s, graph, node_select=\"\")",
"def _filter_out_bad_segments(img1, seg1, img2, seg2):\n minval = tf.reduce_min(tf.reduce_sum(seg1, [0,1])*tf.reduce_sum(seg2, [0,1]))\n if minval < 0.5:\n warnings.warn(\"filtering bad segment\")\n return False\n else:\n return True",
"def test_healpix_to_point_source_cuts(healpix_disk_new):\n skyobj = healpix_disk_new\n skyobj.healpix_to_point()\n skyobj.select(max_brightness=0.9 * skyobj.stokes[0].max())"
] |
[
"0.5587213",
"0.5520415",
"0.54539263",
"0.52487373",
"0.51884097",
"0.51857966",
"0.5172123",
"0.5144501",
"0.51434153",
"0.51297325",
"0.50784206",
"0.5076486",
"0.5027425",
"0.50130695",
"0.5011849",
"0.4993209",
"0.4949142",
"0.4925785",
"0.49238706",
"0.49124724",
"0.49044317",
"0.49040064",
"0.48997873",
"0.48886743",
"0.48847008",
"0.48792002",
"0.48720756",
"0.486436",
"0.4860315",
"0.48459253"
] |
0.61001945
|
0
|
Transfer an Eyewire consensus into neuroglancer. This first requires importing the raw segmentation via a hypersquare ingest task. However, this can probably be streamlined at some point. The volume map file should be JSON encoded and
|
def create_hypersquare_consensus_tasks(task_queue, src_path, dest_path, volume_map_file, consensus_map_path):
with open(volume_map_file, 'r') as f:
volume_map = json.loads(f.read())
vol = CloudVolume(dest_path)
for boundstr, volume_id in tqdm(volume_map.items(), desc="Inserting HyperSquare Consensus Remap Tasks"):
bbox = Bbox.from_filename(boundstr)
bbox.minpt = Vec.clamp(bbox.minpt, vol.bounds.minpt, vol.bounds.maxpt)
bbox.maxpt = Vec.clamp(bbox.maxpt, vol.bounds.minpt, vol.bounds.maxpt)
task = HyperSquareConsensusTask(
src_path=src_path,
dest_path=dest_path,
ew_volume_id=int(volume_id),
consensus_map_path=consensus_map_path,
shape=bbox.size3(),
offset=bbox.minpt.clone(),
)
task_queue.insert(task)
task_queue.wait()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def main():\n\n coastfile = \"coast.wkb\"\n\n # Choose between c, l, i, h, f resolutions\n GSHHSres = 'i'\n\n # Define regional domain\n lonmin, lonmax, latmin, latmax = -21, 18, 47, 67\n\n coast2wkb(**locals())",
"def preprocess(self,\n HUC8, \n state, \n start, \n end,\n drainmax = 400, \n extra_outlets = None,\n overwrite = False, \n verbose = True, \n vverbose = False, \n parallel = True, \n extract = True, \n delineate = True,\n landuse = True, \n landstats = True, \n build = True, \n climate = True, \n gagedata = True,\n subbasinplots = False, \n watershedplots = True, \n landplots = True,\n landpercents = False, \n flowplots = True, \n metstatplots = True,\n metgageplots = True,\n ):\n\n # check the network is mounted on Unix-like systems\n\n if os.name != 'nt':\n\n if not os.path.ismount(self.network):\n print('\\nerror: network ' +\n '{} does not seem to be mounted\\n'.format(self.network))\n raise\n\n # keep track of how long it takes\n\n go = time.time()\n\n # if the destination folder does not exist, make it\n\n if not os.path.isdir(self.output): os.mkdir(self.output)\n\n # if the destination folder for the HUC8 does not exist, make it\n\n its = self.output, HUC8\n output = '{}/{}'.format(*its)\n if not os.path.isdir(output): os.mkdir(output)\n\n # make a subdirectory for hydrography data\n\n self.hydrography = '{}/{}/hydrography'.format(*its)\n if not os.path.isdir(self.hydrography): os.mkdir(self.hydrography)\n\n # make a directory for HSPF calculations\n\n hspfdirectory = '{}/{}/hspf'.format(*its)\n if not os.path.isdir(hspfdirectory): os.mkdir(hspfdirectory)\n\n # make a list of all the years for the CDL extraction\n\n years = [start.year]\n \n t = start\n while t < end:\n if t.year not in years: years.append(t.year)\n t += datetime.timedelta(days = 1)\n\n # extract the data for the HUC8 from the sources\n\n if extract: self.extract(HUC8, start, end)\n\n # delineate the subbasins and the hydrography data\n\n if delineate: self.delineate(HUC8)\n\n # download and extract land use data\n\n if landuse: self.extract_CDL(HUC8, state, years)\n\n # build the watershed object\n\n if build: self.build(HUC8, start, end, years)\n\n # download and extract the climate data\n\n if climate: self.climate(HUC8, start, end)\n\n if verbose: \n\n print('completed preprocessing watershed in ' +\n '{:.1f} seconds\\n'.format((time.time() - go)))",
"def import_file(some_genbank, collection):\n with open(some_genbank, 'r') as open_file:\n collection = kv.get_collection(collection)\n\n # Each \"record\" in genbank file is read, corresponds to individual contigs\n for record in SeqIO.parse(open_file, 'gb'):\n current_contig = record.name\n try:\n current_species = record.annotations['source']\n except KeyError:\n name = re.search(r'\\w+\\/(.+)\\.\\w+$', some_genbank)\n current_species = name.group(1)\n \n\n collection.insert_one({\n 'species':current_species,\n 'contig':current_contig,\n 'dna_seq':str(record.seq),\n 'type':'contig'\n })\n\n print \"Importing {}\".format(current_contig)\n ssu_gene = get_16S(record)\n if ssu_gene:\n try:\n locus_tag = ssu_gene[0].qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n \n parsed_location = kv.get_gene_location(ssu_gene[0].location)\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n },\n 'locus_tag':locus_tag,\n 'annotation':ssu_gene[0].qualifiers['product'][0],\n 'dna_seq':ssu_gene[1],\n 'type':'16S'\n }\n print \"adding 16S gene!\"\n collection.insert_one(gene_record)\n kv.get_collection('16S').insert_one(gene_record)\n\n for feature in record.features:\n if feature.type == 'CDS':\n parsed_location = kv.get_gene_location(feature.location)\n try:\n locus_tag = feature.qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n 'index':None\n },\n 'locus_tag':locus_tag,\n 'annotation':feature.qualifiers['product'][0],\n 'dna_seq':get_dna_seq(feature, record),\n 'aa_seq':feature.qualifiers['translation'][0],\n 'type':'gene'\n }\n collection.insert_one(gene_record)",
"def convert_ecoinvent_2_301():\n ws = get_sheet(\n dirpath / \"lci\" / \"ecoinvent 2-3.01.xlsx\", \"correspondence sheet_corrected\"\n )\n data = [\n [ws.cell(row=row + 1, column=col + 1).value for col in range(17)]\n for row in range(1, ws.max_row)\n ]\n data = {\n \"fields\": [\"name\", \"location\"],\n \"data\": [\n (\n {\"name\": line[0]},\n {\n \"location\": line[2],\n \"name\": line[3],\n \"reference product\": line[1],\n \"system model\": line[4],\n },\n )\n for line in data\n ],\n }\n write_json_file(data, \"simapro-ecoinvent31\")",
"def prepare_and_save(path):\n \n raw, timestamp = ur.MNE_Read_EDF(path)\n \n #Use the time columns to create MNE events structure\n events_log, event_id = clean_log(path)\n event_sample_indexes = ur.parse_events(events_log, timestamp)\n events = ur.events_for_MNE(event_sample_indexes, event_id)\n \n #Add response correct/incorrect to events\n new_events, new_event_id = expand_events(path, events, event_id)\n #Crop the data to include only the time between start and stop of the experiment - many artifacts outside this interval \n raw_cropped = raw.copy().crop(tmin = events[0,0]/raw.info['sfreq'], tmax = events[-1,0]/raw.info['sfreq'])\n #Since the raw was cropped to the time of the first event its' new time is now 0. All following events are shifted.\n new_events[:,0] = new_events[:,0] - new_events[0,0]\n \n #Delete bad channels, ears and visually identified channels\n ears = [ch for ch in raw_cropped.ch_names if 'A' in ch]\n raw_cropped = raw_cropped.drop_channels(ears)\n \n subject_bads = {'Adrianna': ['T4'], 'BartekB' : ['Pz'], 'JeremiaszW' : [], 'KonradW' : ['T3'], 'Lucja' : ['T4', 'F8'], 'MaciekG':[], 'MariuszZ' : [], 'OlaS' :['P4'], 'Patrycja' :[]}\n bads = subject_bads[path.split('\\\\')[-3]]\n if len(bads) != 0:\n raw_cropped = raw_cropped.drop_channels(bads)\n \n #Apply average re-reference\n raw_cropped.save('raw_cropped/' + path.split('\\\\')[-3] +'_raw_cropped.fif', overwrite = True)\n return raw_cropped, new_events, new_event_id",
"def main():\n\n parser = ArgumentParser()\n parser.add_argument('--config', '-c', type=str, required=True, help='Path to config file')\n parser.add_argument('--snapshot_path', '-s', type=str, required=True, default='', help='Path to model snapshot')\n parser.add_argument('--output_dir', '-o', type=str, required=True, default='', help='Path to output directory')\n args = parser.parse_args()\n\n assert exists(args.config)\n assert exists(args.snapshot_path + '.index')\n\n if not exists(args.output_dir):\n makedirs(args.output_dir)\n\n task_monitor = get_monitor(args.config, snapshot_path=args.snapshot_path)\n\n converted_snapshot_path = join(args.output_dir, CKPT_FILE_NAME)\n task_monitor.eliminate_train_ops(converted_snapshot_path)\n\n converted_model_path = '{}-{}'.format(converted_snapshot_path,\n int(basename(args.snapshot_path).split('-')[-1]))\n task_monitor.save_model_graph(converted_model_path, args.output_dir)\n\n task_monitor.freeze_model_graph(converted_model_path,\n join(args.output_dir, PB_FILE_NAME),\n join(args.output_dir, FROZEN_FILE_NAME))",
"def upload_model_data(_slices):\n # new directory path\n _dir = os.getcwd() + '/database/Model-Data/128'\n\n \"\"\" load MCGILL data \"\"\"\n # x=data, y=label, c=clinical, z=contour\n (train_x, train_y, train_c, train_z), (test_x, test_y, test_c, test_z) = dp.load_data_MCGILL(_slices)\n # check data shape\n print(\"Train shape:\", np.array(train_x).shape)\n print(\"Test shape:\", np.array(test_x).shape)\n print(\"Label shapes:\", np.array(train_y).shape, np.array(test_y).shape)\n # create new directory\n path = _dir + '/MCGILL'\n Path(path).mkdir(parents=True, exist_ok=True)\n os.chdir(path)\n # write data to directory\n write_file(train_x, train_y, train_c, train_z, 'train')\n write_file(test_x, test_y, test_c, test_z, 'test')\n\n \"\"\" load MAASTRO data \"\"\"\n # x=data, y=label, c=clinical, z=contour\n (train_x, train_y, train_c, train_z), (test_x, test_y, test_c, test_z) = dp.load_data_MAASTRO(_slices)\n # check data shape\n print(\"Train shape:\", np.array(train_x).shape)\n print(\"Test shape:\", np.array(test_x).shape)\n print(\"Label shapes:\", np.array(train_y).shape, np.array(test_y).shape)\n # create new directory\n path = _dir + '/MAASTRO'\n Path(path).mkdir(parents=True, exist_ok=True)\n os.chdir(path)\n # write data to directory\n write_file(train_x, train_y, train_c, train_z, 'train')\n write_file(test_x, test_y, test_c, test_z, 'test')",
"def ingest():\n db.delete_dataset_records(DATASET_ID)\n\n db.insert_dataset({\n 'dataset_id': DATASET_ID,\n 'title': 'North American Breeding Bird Survey (BBS)',\n 'version': '2016.0',\n 'url': 'https://www.pwrc.usgs.gov/bbs/'})\n\n to_taxon_id = insert_taxa()\n to_place_id = insert_places()\n to_event_id = insert_events(to_place_id)\n insert_counts(to_event_id, to_taxon_id)",
"def main():\r\n args = get_command_line_args()\r\n settings = read_settings(args)\r\n \r\n census_api_key = settings['census_api_key']\r\n state = settings['state']\r\n district = settings['district']\r\n leg_body = settings['leg_body']\r\n census_year = settings['census_year']\r\n election_year = settings['election_year']\r\n voting_precincts_file = settings['voting_precincts']\r\n voting_results_file = settings['voting_results']\r\n \r\n find_blockgroups_in_district(\r\n state=state,\r\n district=district,\r\n leg_body=leg_body,\r\n year=census_year\r\n )\r\n\r\n categories, district_data = make_district_data(\r\n api=census_api_key,\r\n state=state,\r\n district=district,\r\n leg_body=leg_body,\r\n year=census_year\r\n )\r\n\r\n # Estimate voting precinct data based on block group data\r\n district_data = make_voting_precinct_data(\r\n district_data=district_data, \r\n categories=categories,\r\n state=state,\r\n district=district,\r\n leg_body=leg_body,\r\n year=census_year,\r\n voting_precincts_file=voting_precincts_file\r\n )\r\n\r\n categories, district_data = make_voting_results_data(\r\n categories=categories, \r\n district_data=district_data, \r\n state=state, \r\n district=district, \r\n leg_body=leg_body, \r\n election_year=election_year,\r\n census_year=census_year,\r\n voting_precincts_file=voting_precincts_file, \r\n voting_results_file=voting_results_file\r\n )\r\n\r\n to_json(district_data, \"static/data/district-data.json\")\r\n to_json(categories, \"static/data/categories.json\")",
"def convert_semeval_with_extraction(detail=True):\n\n # Parameters\n p = {\n \"data_sets\": [\"../data/semeval2015_task15/train/Microcheck/\", \"../data/semeval2015_task15/test/Microcheck/\"],\n \"output_dir\": \"../data/semeval_mic_train_and_test_with_extraction\",\n \"relations\": [\"subj\", \"obj\", \"iobj\", \"advprep\", \"acomp\", \"scomp\"],\n }\n # if detail:\n # print_params(p)\n os.system(\"rm -rf %s\" % p[\"output_dir\"])\n os.system(\"mkdir -p %s\" % p[\"output_dir\"])\n for data_set_path in p[\"data_sets\"]:\n files = os.listdir(\"%s/task1/\" % data_set_path)\n for file_name in files:\n if detail:\n print(\"to process %s\" % file_name)\n verb_name, ext = os.path.splitext(file_name)\n file_path = \"%s/task1/%s\" % (data_set_path, file_name)\n # Read cluster\n file_cluster_path = \"%s/task2/%s.clust\" % (data_set_path, verb_name)\n fh = open(file_cluster_path, \"r\")\n verb_id2cluster_id = {}\n for line in fh:\n line = line.strip()\n if line == \"\":\n continue\n verb_id, cluster_id = line.split(\"\\t\")\n verb_id2cluster_id[verb_id] = cluster_id\n fh.close()\n\n out_file_path = \"%s/%s\" % (p[\"output_dir\"], verb_name)\n fh_out = open(out_file_path, \"w\")\n fh = open(file_path, \"r\", encoding = \"ISO-8859-1\")\n sent = \"\"\n cluster_id = -1\n for line in fh:\n line = line.strip()\n if line == \"\":\n fh_out.write(\"%s\\t%s\\n\" % (cluster_id, sent))\n sent = \"\"\n continue\n tokens = line.split(\"\\t\")\n if len(tokens) == 4:\n if tokens[2] == \"v\":\n sent += \"\\t%s\\t\" % tokens[1]\n cluster_id = verb_id2cluster_id[tokens[0]]\n else:\n sent += tokens[1] + \" \"\n\n fh.close()\n fh_out.close()",
"def main():\r\n mvip, user, user_pass, mvip_node = get_inputs()\r\n payload = build_payload()\r\n headers, url = build_auth(mvip, user, user_pass, mvip_node)\r\n response_json = connect_cluster(headers, url, payload)\r\n paired_vols = get_replication_status(response_json)\r\n payload = get_vol_stats(paired_vols)\r\n response_json = connect_cluster(headers, url, payload)\r\n parse_volume_stats(paired_vols, response_json)",
"def main():\n tl = TwoLocus(in_path='/csbiodata/public/www.csbio.unc.edu/htdocs/sgreens/pairwise_origins/')\n # tl = TwoLocus()\n # tl.preprocess(glob.glob('OR_ss_origins/*.hap'))\n print len(tl.list_available_strains())\n exit()\n # print len(tl.list_available_strains())\n # tl.preprocess(['cc_origins.csv'])\n # tl.preprocess(['ccv_origins.csv'])\n classical = [s for s in\n [\"129P1/ReJ\", # \"129P3/J\", \"129S1SvlmJ\", \"129S6\", \"129T2/SvEmsJ\", \"129X1/SvJ\", \"A/J\", \"A/WySnJ\",\n \"AEJ/GnLeJ\", \"AEJ/GnRk\", \"AKR/J\", \"ALR/LtJ\", \"ALS/LtJ\", \"BALB/cByJ\", \"BALB/cJ\", \"BDP/J\", \"BPH/2J\",\n # \"BPL/1J\", \"BPN/3J\", \"BTBR T<+>tf/J\", \"BUB/BnJ\", \"BXSB/MpJ\", \"C3H/HeJ\", \"C3HeB/FeJ\", \"C57BL/10J\",\n # \"C57BL/10ScNJ\", \"C57BL/10SAAAJ\", \"C57BL/6CR\", \"C57BL/6J\", \"C57BL/6NCI\", \"C57BL/6Tc\", \"C57BLKS/J\",\n # \"C57BR/cdJ\", \"C57L/J\", \"C58/J\", \"CBA/CaJ\", \"CBA/J\", \"CE/J\", \"CHMU/LeJ\", \"DBA/1J\", \"DBA/1LacJ\",\n # \"DBA/2DeJ\", \"DBA/2HaSmnJ\", \"DBA/2J\", \"DDK/Pas\", \"DDY/JclSidSeyFrkJ\", \"DLS/LeJ\", \"EL/SuzSeyFrkJ\",\n # \"FVB/NJ\", \"HPG/BmJ\", \"I/LnJ\", \"IBWSP2\", \"IBWSR2\", \"ICOLD2\", \"IHOT1\", \"IHOT2\", \"ILS\", \"ISS\", \"JE/LeJ\",\n # \"KK/HlJ\", \"LG/J\", \"LP/J\", \"LT/SvEiJ\", \"MRL/MpJ\", \"NOD/ShiLtJ\", \"NON/ShiLtJ\", \"NONcNZO10/LtJ\",\n # \"NONcNZO5/LtJ\", \"NOR/LtJ\", \"NU/J\", \"NZB/BlNJ\", \"NZL/LtJ\", \"NZM2410/J\", \"NZO/HlLtJ\", \"NZW/LacJ\", \"P/J\",\n # \"PL/J\", \"PN/nBSwUmabJ\", \"RF/J\", \"RHJ/LeJ\", \"RIIIS/J\", \"RSV/LeJ\", \"SB/LeJ\", \"SEA/GnJ\", \"SEC/1GnLeJ\",\n # \"SEC/1ReJ\", \"SH1/LeJ\", \"SI/Col Tyrp1 Dnahc11/J\", \"SJL/Bm\", \"SJL/J\", \"SM/J\", \"SSL/LeJ\", \"ST/bJ\",\n \"STX/Le\", ] # \"SWR/J\", \"TALLYHO/JngJ\", \"TKDU/DnJ\", \"TSJ/LeJ\", \"YBR/EiJ\", \"ZRDCT Rax<+>ChUmdJ\"]\n if tl.is_available(s)]\n wild_derived = [s for s in\n ['22MO',\n # 'BIK/g', 'BULS', 'BUSNA', 'BZO', 'CALB/RkJ', 'CASA/RkJ', 'CAST/EiJ', 'CIM', 'CKN', 'CKS',\n 'CZECHI/EiJ', 'CZECHII/EiJ', 'DCA', 'DCP', 'DDO', 'DEB', 'DGA', 'DIK', 'DJO', 'DKN', 'DMZ', 'DOT',\n # 'IS/CamRkJ', 'JF1/Ms', 'LEWES/EiJ', 'MBK', 'MBS', 'MCZ', 'MDG', 'MDGI', 'MDH', 'MGA', 'MH',\n # 'MOLD/RkJ', 'MOLF/EiJ', 'MOLG/DnJ', 'MOR/RkJ', 'MPB', 'MSM/Ms', 'PERA/EiJ', 'PERC/EiJ', 'POHN/Deh',\n # 'PWD/PhJ', 'PWK/PhJ', 'RBA/DnJ', 'RBB/DnJ', 'RBF/DnJ', 'SF/CamEiJ', 'SKIVE/EiJ', 'SOD1/EiJ',\n # 'STLT', 'STRA', 'STRB', 'STUF', 'STUP', 'STUS', 'TIRANO/EiJ', 'WLA', 'WMP', 'WSB/EiJ',\n 'ZALENDE/EiJ'] if tl.is_available(s)]\n tl.contingency_table(classical, wild_derived, '/csbiohome01/sgreens/Projects/intervals/contingency.csv')\n exit()\n x = TwoLocus(chrom_sizes=[20e6, 20e6])\n x.preprocess([\"test2.csv\"])\n x.unique_combos(['A', 'B', 'D'], ['C', 'E'])\n x.sources_at_point_pair('1', 1, '1', 10000000, ['A'])\n # x.interlocus_dependence([chr(c) for c in xrange(ord('A'), ord('J')+1)])\n # exit()\n\n x = TwoLocus(chrom_sizes=[20 * 10 ** 6, 20 * 10 ** 6])\n x.preprocess([\"test.csv\"])\n rez = x.pairwise_frequencies([\"A\"])\n\n areas = x.calculate_genomic_area(rez[0], rez[1])\n total = 0.0\n\n for combo in subspecies.iter_combos():\n print \"\\t{:15s}({:4d}):{:1.5f}\".format(subspecies.to_string(combo), combo,\n areas[str(subspecies.to_string(combo))])\n total += areas[str(subspecies.to_string(combo))]\n print \"\\t{:21s}:{:1.5f}\".format(\"Total\", total)\n\n sys.exit(1)\n # for code, combo in combos.iteritems():\n # print \"\\n\", rez[1]\n # print \"\\t{} ({}):\\n{}\".format(combo, code, rez[0][code])",
"def new_consensus(self, consensus):\r\n pass",
"def main():\n parser = ArgumentParser(description=\"write to a file\")\n\n parser.add_argument(\"-i\",\"--input\", type=setup.is_valid_h5_file, required=True, nargs='+',\n help=\"path(s) of HDF5 master file(s)\")\n\n parser.add_argument(\"-b\",\"--beamcenter\", nargs=2, required=True,\n help=\"beam center in X and Y (two arguments)\")\n\n parser.add_argument(\"-r\",\"--oscillation\", type=float, default=1,\n help=\"oscillation angle per well, default = 1\")\n\n parser.add_argument(\"-d\",\"--distance\", type=float, default=100,\n help=\"detector distance in mm\")\n\n parser.add_argument(\"-w\",\"--wavelength\", type=float, default=1.216,\n help=\"Wavelength in Angstrom, default is 1.216\")\n\n parser.add_argument(\"-f\",\"--framesperdegree\", type=int, default=5,\n help=\"Number of frames per degree, default is 5\")\n\n parser.add_argument(\"-t\",\"--totalframes\", type=int, default=0,\n help=\"Total number of frames to be processed, default all\")\n\n parser.add_argument(\"--output\", default=os.getcwd(),\n help=\"Use this option to change output directory, default pwd\")\n\n parser.add_argument(\"-sg\",\"--spacegroup\", type=int, default=0,\n help=\"Space group\")\n\n parser.add_argument(\"-u\",\"--unitcell\", type=str, default=\"50 50 50 90 90 90\",\n help=\"unit cell\")\n\n argslist = parser.parse_args()\n for masterfile in argslist.input:\n master1= Master(argslist,masterfile)\n master1.printDataWells()",
"def main():\n parser = cmdLineParse()\n inps = parser.parse_args()\n gf = asf.load_inventory(inps.inventory)\n\n if inps.template:\n print(f\"Reading from template file: {inps.template}...\")\n inputDict = dice.read_yaml_template(inps.template)\n else:\n inputDict = {\n \"topsinsar\": {\n \"sensorname\": \"SENTINEL1\",\n \"reference\": {\"safe\": \"\"},\n \"secondary\": {\"safe\": \"\"},\n }\n }\n\n intdir = \"int-{0}-{1}\".format(inps.reference, inps.secondary)\n if not os.path.isdir(intdir):\n os.mkdir(intdir)\n os.chdir(intdir)\n\n reference_urls = asf.get_slc_urls(gf, inps.reference, inps.path)\n secondary_urls = asf.get_slc_urls(gf, inps.secondary, inps.path)\n downloadList = reference_urls + secondary_urls\n inps.reference_scenes = [os.path.basename(x) for x in reference_urls]\n inps.secondary_scenes = [os.path.basename(x) for x in secondary_urls]\n\n if inps.poeorb:\n try:\n frame = os.path.basename(inps.reference_scenes[0])\n downloadList.append(asf.get_orbit_url(frame))\n frame = os.path.basename(inps.secondary_scenes[0])\n downloadList.append(asf.get_orbit_url(frame))\n except Exception as e:\n print(\"Trouble downloading POEORB... maybe scene is too recent?\")\n print(\"Falling back to using header orbits\")\n print(e)\n inps.poeorb = False\n pass\n\n # Update input dictionary with argparse inputs\n inputDict[\"topsinsar\"][\"reference\"][\"safe\"] = inps.reference_scenes\n inputDict[\"topsinsar\"][\"reference\"][\"output directory\"] = \"referencedir\"\n inputDict[\"topsinsar\"][\"secondary\"][\"safe\"] = inps.secondary_scenes\n inputDict[\"topsinsar\"][\"secondary\"][\"output directory\"] = \"secondarydir\"\n # Optional inputs\n # swaths, poeorb, dem, roi, gbox, alooks, rlooks, filtstrength\n if inps.swaths:\n inputDict[\"topsinsar\"][\"swaths\"] = inps.swaths\n if inps.dem:\n inputDict[\"topsinsar\"][\"demfilename\"] = inps.dem\n if inps.roi:\n inputDict[\"topsinsar\"][\"regionofinterest\"] = inps.roi\n if inps.gbox:\n inputDict[\"topsinsar\"][\"geocodeboundingbox\"] = inps.gbox\n if inps.filtstrength:\n inputDict[\"topsinsar\"][\"filterstrength\"] = inps.filtstrength\n if inps.alooks:\n inputDict[\"topsinsar\"][\"azimuthlooks\"] = inps.alooks\n if inps.rlooks:\n inputDict[\"topsinsar\"][\"rangelooks\"] = inps.rlooks\n print(inputDict)\n xml = dice.dict2xml(inputDict)\n dice.write_xml(xml)\n # Create a download file\n asf.write_download_urls(downloadList)\n print(f\"Generated download-links.txt and topsApp.xml in {intdir}\")",
"def convert_topology(src_filename, set_backbone=True, in_place=False, split_dir=None):\n\n # Grab unit cell description (should be on first few lines:\n cryst = None\n with open(src_filename) as src:\n for line in src.readlines():\n if line.startswith('CRYST1'):\n cryst = line\n break\n\n # Read in source PDB (DEShaw original format)\n src_pdb = PdbStructure(open(src_filename))\n atoms = list(src_pdb.iter_atoms())\n topo = md.load(src_filename).top\n\n # Break into 4 segments\n segment_list = ['C1', 'C2', 'C3', 'C4']\n segment = {l:[] for l in segment_list}\n for i in atoms: \n segment[i.segment_id].append(i)\n\n # Set temperature factor (for gradual heating) \n if set_backbone:\n backbone = topo.select(\"backbone\")\n for i in range(0, len(segment['C1'])):\n if i in backbone:\n segment['C1'][i].location.temperature_factor = 1.0\n\n # Resort water segements and alias \"pseu\" to OM (tip4p forcefield)\n for wat in ['C2', 'C3']:\n segment[wat] = sorted(segment[wat], key = lambda i: i.residue_number)\n start_serial_num = min(segment[wat], key= lambda i: i.serial_number)\n for i in range(0, len(segment[wat])):\n newsn = i + start_serial_num.serial_number\n segment[wat][i].serial_number = newsn\n if segment[wat][i].get_name == 'pseu':\n segment[wat][i].set_name_with_spaces(' OM ')\n\n # FOR RE-RUNNING THE PSFGEN\n if split_dir is not None:\n for s in segment_list:\n with open(split_dir + '/%s.pdb' % s, 'w') as dest:\n for atom in segment[s]:\n _=dest.write(str(atom) + '\\n')\n\n # Writeout new file\n if in_place:\n dest = open(src_filename, 'w')\n if cryst is not None:\n dest.write(cryst)\n for s in segment_list:\n for atom in segment[s]:\n _=dest.write(str(atom) + '\\n')\n _=dest.write('END')\n dest.close()",
"def main():\n\n\n\n skulls_folder = os.listdir(RAW_IMAGE_DIRECTORY)\n\n # fetch and sort the .mnc and .tag files\n mnc_files = [f for f in skulls_folder if 'mnc' in f]\n tag_files = [f for f in skulls_folder if 'tag' in f]\n mnc_names = [i.split('.mnc')[0] for i in mnc_files]\n \n mnc_files.sort()\n tag_files.sort()\n mnc_names.sort()\n\n # Process and package ndarrays as tuples inside npy file\n package_to_npy(RAW_IMAGE_DIRECTORY, mnc_files, tag_files, mnc_names)\n \n print('\\n' * 5)\n\n # Push the npy files to GCP Cloud Storage\n upload_to_gcp(PROCESSED_IMAGE_DIRECTORY, GCP_PROJECT_NAME, GCP_BUCKET_NAME)",
"def main(input_dir, bids_dir, exclude_fieldmaps):\n\n input_dir = Path(input_dir).absolute()\n bids_dir = Path(bids_dir).absolute()\n raw_dir = bids_dir / \"sourcedata\"\n\n copy_all_paths_to_sourcedata(input_dir, raw_dir)\n\n fix_acquisition_numbers_of_json_files_in(raw_dir)\n\n old_and_new_paths = create_dictionary_of_old_and_new_paths(raw_dir, bids_dir, exclude_fieldmaps)\n\n copy_files_to_their_new_homes(old_and_new_paths)\n\n fix_jsons_in(bids_dir)\n\n add_dataset_description_to(bids_dir)\n\n write_tsvs(raw_dir, bids_dir)\n\n print(\"Congratulations! You're BIDS-compliant, yay!\")\n print(\"To double-check, use this handy dandy BIDS validator: https://bids-standard.github.io/bids-validator/\")",
"def main():\n cursor = PGCONN.cursor()\n # track our work\n with open(\"myhucs.txt\", \"w\") as fh:\n # Change the working directory to where we have data files\n os.chdir(\"../../data/%s\" % (sys.argv[2],))\n # collect up the GeoJSONs in that directory\n fns = glob.glob(\"smpldef3m_*.json\")\n fns.sort()\n i = 0\n\n for fn in fns:\n # Save our work every 100 HUC12s,\n # so to keep the database transaction\n # at a reasonable size\n if i > 0 and i % 100 == 0:\n PGCONN.commit()\n cursor = PGCONN.cursor()\n df, snapdf = get_data(fn)\n huc12 = process(cursor, fn, df, snapdf)\n fh.write(\"%s\\n\" % (huc12,))\n i += 1\n\n # Commit the database changes\n cursor.close()\n PGCONN.commit()\n LOG.info(\"Complete.\")",
"def main():\n directed = True\n try:\n opts,args = getopt.getopt(sys.argv[1:], \"\")\n except:\n usage(sys.argv[0])\n for opt,arg in opts:\n usage(sys.argv[0])\n\n if len(args) != 5:\n usage(sys.argv[0])\n\n data_dir = args[0]\n num_samples = int(args[1])\n num_seeds = int(args[2])\n num_waves = int(args[3]) - 1 # -1 for consistency with SPNet\n outputdir = args[4]\n\n print \"directed:\", directed\n print \"number of samples:\", num_samples\n print \"number of seeds:\", num_seeds\n print \"number of waves:\", num_waves\n print \"output directory:\", outputdir\n \n if not os.path.exists(outputdir):\n os.mkdir(outputdir)\n\n sys.stdout.write('loading data from ' + data_dir + '...')\n start = time.time()\n (G, profile, colnames) = load_pokec_data(data_dir)\n print time.time() - start, 's'\n\n snap.PrintInfo(G)\n\n\n # We do not add attributes to nodes as SNAP node attribute as\n # these seem to get lost by varoius operations including subgraph\n # that we need to use, so instead maintain them just in the\n # dictionary mapping the original node ids to the attributes -\n # fortunately the original node ids are maintained by\n # GetSubGraph() so we can used these to index the profile\n # dictoinary in the subgraphs\n\n\n ## https://snap.stanford.edu/data/soc-pokec-readme.txt\n ## region:\n ## string, mostly regions in Slovakia (example: \"zilinsky kraj,\n ## kysucke nove mesto\" means county Zilina, town Kysucke Nove Mesto,\n ## Slovakia), some foreign countries (example: \"zahranicie, \n ## zahranicie - nemecko\" means foreign country Germany (nemecko)),\n ## some Czech regions (example: \"ceska republika, cz - ostravsky \n ## kraj\" means Czech Republic, county Ostrava (ostravsky kraj))\n ## We just make this a factor, looking at the output written by print\n ## below, it looks reasonable, but is is only a categorical variable\n ## allowing us to tell if two users are in the same region or not.\n ## TODO we could recode this so that we can have different variables\n ## for being in a different country, major city, etc.\n # Cannot do this:\n #profile[:][colnames['region']] = convert_to_int_cat(profile[:][colnames['region']]) # like factor in R\n # as get \"TypeError: unhashable type\" so have to do this instead:\n id_regions = [(k, p[colnames['region']]) for (k,p) in profile.iteritems()]\n id_regions_int = convert_to_int_cat([x[1] for x in id_regions])\n for i in xrange(len(id_regions)):\n profile[id_regions[i][0]][colnames['region']] = id_regions_int[i]\n\n for attr in ['region']:\n sys.stdout.write('There are %d NA for %s\\n' % ([p[colnames[attr]] for p in profile.itervalues()].count('NA'), attr))\n\n\n # get num_samples * num_seeds distinct random seed nodes (sample without replacement)\n # and convert to list of lists where each list is seed set for one sample\n allseeds = random.sample([node.GetId() for node in G.Nodes()], num_samples * num_seeds)\n seedsets = [allseeds[i:i+num_seeds] for i in range(0, len(allseeds), num_seeds)]\n\n sampledesc_filename = outputdir + os.path.sep + \"sampledesc\" + os.path.extsep + \"txt\"\n sampledesc_f = open(sampledesc_filename, 'w')\n\n for i in range(num_samples):\n sys.stdout.write( 'generating snowball sample ' + str(i+1) + '... ' )\n start = time.time()\n # have to convert seedset to TIntV for SNAP\n seedsVec = snap.TIntV()\n for nodeid in seedsets[i]:\n seedsVec.Add(nodeid)\n Gsample = snowball_sample(G, num_waves, seedsVec)\n nodelist = list() # keep this iteration in list so we always use same order in future\n zonedict = dict() # map nodeid : zone\n for node in Gsample.Nodes():\n nodelist.append(node.GetId())\n zonedict[node.GetId()] = Gsample.GetIntAttrDatN(node.GetId(), \"zone\")\n print time.time() - start, 's'\n \n snap.PrintInfo(Gsample)\n subgraph_filename = outputdir + os.path.sep + \"subgraph\" + str(i) + os.path.extsep + \"txt\"\n write_graph_file(subgraph_filename, Gsample, nodelist)\n subzone_filename = outputdir + os.path.sep + \"subzone\" + str(i) + os.path.extsep + \"txt\"\n write_zone_file(subzone_filename, Gsample, nodelist, zonedict)\n subactor_binary_filename = outputdir + os.path.sep + \"subactorbin\" + str(i) + os.path.extsep + \"txt\"\n subactor_categorical_filename = outputdir + os.path.sep + \"subactorcat\" + str(i) + os.path.extsep + \"txt\"\n subactor_continuous_filename = outputdir + os.path.sep + \"subactorcont\" + str(i) + os.path.extsep + \"txt\"\n\n write_subactors_file_binary(subactor_binary_filename, Gsample, nodelist, profile, colnames)\n write_subactors_file_categorical(subactor_categorical_filename, Gsample, nodelist, profile, colnames)\n write_subactors_file_continuous(subactor_continuous_filename, Gsample, nodelist, profile, colnames)\n\n nodeid_filename = outputdir + os.path.sep + \"subnodeid\" + str(i) + os.path.extsep + \"txt\"\n write_subgraph_nodeids(nodeid_filename, nodelist)\n \n # format of sampledesc file is:\n # N subzone_filename subgraph_filename binary_Filename cat_filename cont_filename\n sampledesc_filename = outputdir + os.path.sep + \"sampledesc\" + os.path.extsep + \"txt\"\n sampledesc_f.write(\"%d %s %s %s %s %s\\n\" % (Gsample.GetNodes(), subzone_filename,\n subgraph_filename, subactor_binary_filename,\n subactor_categorical_filename, subactor_continuous_filename))\n\n sampledesc_f.close()",
"def transferGEE(self):\n task_id = ee.data.newTaskId()[0]\n time = self.meta['properties']['system:time_start']\n \n self.meta['properties']['system:time_start'] = ee.Date(time).getInfo()['value']\n \n request = {\n 'id':'{collectionAsset}/{assetName}'.format(collectionAsset= self.meta['collectionAsset'],assetName =self.meta['assetName']),\n 'properties':self.meta['properties'],\n 'tilesets': [{'sources': self.sources}],\n 'pyramidingPolicy':self.meta['pyramidingPolicy'].upper(),\n 'bands':self.meta['bandNames']\n }\n ee.data.startIngestion(task_id, request, True)\n return task_id",
"def convert_semeval_without_extraction(detail=True):\n\n # Parameters\n p = {\n \"data_sets\": [\"../cpa_data/Microcheck/\", \"../cpa_data/testdata/Microcheck\"],\n \"output_dir\": \"../data/semeval_mic_train_and_test_no_extraction\"\n }\n # if detail:\n # print_params(p)\n os.system(\"rm -rf %s\" % p[\"output_dir\"])\n os.system(\"mkdir -p %s\" % p[\"output_dir\"])\n for data_set_path in p[\"data_sets\"]:\n files = os.listdir(\"%s/input/\" % data_set_path)\n for file_name in files:\n if detail:\n print(\"to process %s\" % file_name)\n verb_name, ext = os.path.splitext(file_name)\n file_path = \"%s/input/%s\" % (data_set_path, file_name)\n # Read cluster\n file_cluster_path = \"%s/task2/%s.clust\" % (data_set_path, verb_name)\n fh = open(file_cluster_path, \"r\")\n verb_id2cluster_id = {}\n for line in fh:\n line = line.strip()\n if line == \"\":\n continue\n verb_id, cluster_id = line.split(\"\\t\")\n verb_id2cluster_id[verb_id] = cluster_id\n fh.close()\n\n out_file_path = \"%s/%s\" % (p[\"output_dir\"], verb_name)\n fh_out = open(out_file_path, \"w\")\n fh = open(file_path, \"r\", encoding = \"ISO-8859-1\")\n sent = \"\"\n cluster_id = -1\n for line in fh:\n line = line.strip()\n if line == \"\":\n fh_out.write(\"%s\\t%s\\n\" % (cluster_id, remove_punctuations(sent)))\n sent = \"\"\n continue\n tokens = line.split(\"\\t\")\n if len(tokens) == 3 and tokens[2] == \"v\":\n sent += \"\\t%s\\t\" % tokens[1]\n cluster_id = verb_id2cluster_id[tokens[0]]\n else:\n sent += tokens[1] + \" \"\n fh.close()\n fh_out.close()",
"def prepare_input(self, only_center = True):\n \n if only_center:\n nx = [0]\n ny = [0]\n else:\n nx = [0,1,-1]\n ny = [0,1,-1]\n gauge = dd.read_csv(str(Path(self.db_location, 'gauge', '*.csv.gz')), \n compression='gzip', \n assume_missing=True,\n dtype = {'TIMESTAMP':int, 'STATION': str})\n \n gauge = gauge.compute().drop_duplicates()\n gauge = gauge.replace(-9999,np.nan)\n for x in nx:\n for y in ny:\n logging.info('Processing neighbour {:d}{:d}'.format(x, y))\n radar = dd.read_parquet(str(Path(self.db_location, 'radar',\n '*.parquet')))\n refer = dd.read_parquet(str(Path(self.db_location, 'reference', \n '*.parquet')))\n \n # Select only required pixel\n radar = radar.loc[np.logical_and(radar['NX'] == x, \n radar['NY'] == y)]\n refer = refer.loc[np.logical_and(refer['NX'] == x, \n refer['NY'] == y)]\n \n # Convert to pandas and remove duplicates \n radar = radar.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION',\n 'RADAR',\n 'NX','NY',\n 'SWEEP'])\n \n refer = refer.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION'])\n \n radar = radar.sort_values(by = ['TIMESTAMP','STATION','SWEEP'])\n refer = refer.sort_values(by = ['TIMESTAMP','STATION'])\n gauge = gauge.sort_values(by = ['TIMESTAMP','STATION'])\n # Get only valid precip data\n gauge = gauge[np.isfinite(gauge['RRE150Z0'])]\n \n # Create individual 10 min - station stamps\n gauge['s-tstamp'] = np.array(gauge['STATION'] + \n gauge['TIMESTAMP'].astype(str)).astype(str)\n radar['s-tstamp'] = np.array(radar['STATION'] + \n radar['TIMESTAMP'].astype(str)).astype(str)\n refer['s-tstamp'] = np.array(refer['STATION'] + \n refer['TIMESTAMP'].astype(str)).astype(str)\n \n # Get gauge and reference only when radar data available\n \n # Find timestamps that are in the three datasets\n ststamp_common = np.array(pd.Series(list(set(gauge['s-tstamp'])\n .intersection(set(refer['s-tstamp'])))))\n ststamp_common = np.array(pd.Series(list(set(radar['s-tstamp'])\n .intersection(set(ststamp_common)))))\n radar = radar.loc[radar['s-tstamp'].isin(ststamp_common)]\n gauge = gauge.loc[gauge['s-tstamp'].isin(ststamp_common)]\n refer = refer.loc[refer['s-tstamp'].isin(ststamp_common)]\n \n \n # Filter incomplete hours\n stahour = np.array(gauge['STATION'] + \n ((gauge['TIMESTAMP'] - 600 ) - \n (gauge['TIMESTAMP'] - 600 ) % 3600).astype(str)).astype(str)\n \n full_hours = np.array(gauge.groupby(stahour)['STATION']\n .transform('count') == 6)\n \n refer = refer.reindex[full_hours]\n gauge = gauge.reindex[full_hours] \n radar = radar.reindex[radar['s-tstamp'].\n isin(np.array(gauge['s-tstamp']))]\n \n stahour = stahour[full_hours]\n \n # Creating vertical grouping index\n \n _, idx, grp_vertical = np.unique(radar['s-tstamp'],\n return_inverse = True,\n return_index = True)\n # Get original order\n sta_tstamp_unique = radar['s-tstamp'][np.sort(idx)]\n # Preserves order and avoids sorting radar_statstamp\n grp_vertical = idx[grp_vertical]\n # However one issue is that the indexes are not starting from zero with increment\n # of one, though they are sorted, they are like 0,7,7,7,15,15,23,23\n # We want them starting from zero with step of one\n grp_vertical = rankdata(grp_vertical,method='dense') - 1\n \n # Repeat operation with gauge hours\n sta_hourly_unique, idx, grp_hourly = np.unique(stahour, \n return_inverse = True,\n return_index = True)\n grp_hourly = idx[grp_hourly]\n \n # Add derived variables height iso0 (HISO) and height above ground (HAG)\n # Radar\n stations = constants.METSTATIONS\n cols = list(stations.columns)\n cols[1] = 'STATION'\n stations.columns = cols\n radar = pd.merge(radar,stations, how = 'left', on = 'STATION',\n sort = False)\n \n radar['HISO'] = -radar['T'] / constants.LAPSE_RATE * 100\n radar['HAG'] = radar['HEIGHT'] - radar['Z']\n radar['HAG'][radar['HAG'] < 0] = 0\n \n # Gauge\n gauge['minutes'] = (gauge['TIMESTAMP'] % 3600)/60\n \n # Save all to file\n refer.to_parquet(str(Path(self.input_location, \n 'reference_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n radar.to_parquet(str(Path(self.input_location, \n 'radar_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n grp_idx = {}\n grp_idx['grp_vertical'] = grp_vertical\n grp_idx['grp_hourly'] = grp_hourly\n grp_idx['tstamp_unique'] = sta_tstamp_unique\n \n pickle.dump(grp_idx, \n open(str(Path(self.input_location, \n 'grouping_idx_x{:d}y{:d}.p'.format(x,y))),'wb'))\n \n if x == 0 and y == 0:\n # Save only gauge for center pixel since it's available only there\n gauge.to_parquet(str(Path(self.input_location, 'gauge.parquet')),\n compression = 'gzip', index = False)",
"def test_conus():\n sat = gini.GINIZFile(get_test_file(\"TIGN02\", fponly=True))\n assert sat.archive_filename() == \"GOES_SUPER_IR_201509281745.png\"\n assert sat.awips_grid() == 0\n assert sat.metadata[\"map_projection\"] == 5",
"def main():\n gcs_client = storage.Client()\n bucket_name: str = os.environ.get(\"GCS_BUCKET_NAME\")\n blob_name: str = os.environ.get(\"GCS_BLOB_NAME\")\n\n print(f\"Uploading BigEarth to bucket {bucket_name} and blob {blob_name}\")\n\n for url, blob_name in [\n (\"http://bigearth.net/downloads/BigEarthNet-v1.0.tar.gz\", blob_name),\n (\"http://bigearth.net/static/documents/patches_with_cloud_and_shadow.csv\",\n \"patches_with_cloud_and_shadow.csv\"),\n (\"http://bigearth.net/static/documents/patches_with_snow.csv\", \"patches_with_seasonal_snow.csv\"),\n ]:\n with GCSObjectStreamUploader(client=gcs_client, bucket_name=bucket_name, blob_name=blob_name) as gcs_uploader:\n with requests.get(url, stream=True) as response_stream:\n for chunk in response_stream.raw.stream(128*2000, decode_content=False):\n gcs_uploader.write(chunk)",
"def main():\n \n download_blob('data/artist_albums_lyrics_0607.json', '/tmp/artist_albums_lyrics_0607.json')\n\n with open('/tmp/artist_albums_lyrics_0607.json', 'r') as f:\n data = json.load(f)\n\n data_emb = add_embeddings(data)\n\n with open('artist_albums_lyrics_embs_0608.json', 'w') as f:\n json.dump(data_emb, f, indent=4)\n\n upload_blob('artist_albums_lyrics_embs_0608.json', folder_name='data')",
"def main_train(args):\n if args.preprocessing is not None:\n raise NotImplementedError(\"ASV Preprocessing is not yet supported\")\n if args.output is None and args.snapshot_rate is not None:\n raise RuntimeError(\"Can not save snapshots without output\")\n\n # Create and load cm network\n cm_model, cm_feature_network = create_cm_network(args)\n cm_state_dict = torch.load(args.cm_model)\n # Remove the bonafide_average vector stored with\n # CM models\n average_bonafide = cm_state_dict.pop(\"average_bonafide\")\n # Do not load parameters if so desired\n if not args.from_scratch:\n cm_model.load_state_dict(cm_state_dict)\n # Get CM embedding size we are about to feed to ASV system\n cm_embedding_size = average_bonafide.shape[0]\n\n # Create and load ASV network\n asv_model, asv_feature_network = create_asv_network(XVECTOR_FEATURE_SIZE, args)\n asv_state_dict = torch.load(args.asv_model)\n # Remove any preprocessing steps\n preprocessing_parameters = {\n \"centering_mean\": asv_state_dict.pop(\"centering_mean\", None),\n \"lda\": asv_state_dict.pop(\"lda\", None)\n }\n if not args.from_scratch:\n asv_model.load_state_dict(asv_state_dict)\n\n asv_features, cm_features, speaker_labels, is_targets, is_spoofs = load_joint_train_data_asvspoof(\n args.joint_filelist, args.joint_asv_directory, args.joint_cm_directory,\n )\n\n # Move CM features to cuda already\n numpy_to_torch_cuda = lambda arr: torch.from_numpy(arr).float().cuda()\n cm_features = list(map(numpy_to_torch_cuda, cm_features))\n\n # Split bonafide spoof samples to a separate list for\n # saving CM model\n bonafide_cm_features = [cm_features[i] for i in range(len(cm_features)) if not is_spoofs[i]]\n\n # Preprocess data a little bit: Create a dictionary\n # that maps speaker labels to indeces where speaker has\n # bona fide samples\n speaker_label_to_indeces = {}\n for i, (speaker_label, is_target) in enumerate(zip(speaker_labels, is_targets)):\n if is_target:\n # Speaker label and input features match,\n # add this index to list of valid samples for that\n # speaker\n speaker_label_to_indeces[speaker_label] = (\n speaker_label_to_indeces.get(speaker_label, []) + [i]\n )\n\n # Also separate indeces of nontarget/target trials\n is_targets = np.array(is_targets)\n target_indeces = np.where(is_targets)[0]\n nontarget_indeces = np.where(~is_targets)[0]\n\n all_parameters = list(asv_model.parameters())\n all_parameters.extend(list(cm_model.parameters()))\n # SGD is stabler than Adam, but need to see how many epochs it takes to train\n optimizer = None\n if args.optimizer == \"adam\":\n optimizer = torch.optim.Adam(all_parameters, weight_decay=args.l2_weight, lr=args.lr)\n elif args.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(all_parameters, weight_decay=args.l2_weight, lr=args.lr)\n\n num_samples = len(asv_features)\n total_iterations = int(args.epochs * (num_samples // BATCH_SIZE))\n\n progress_bar = tqdm(total=total_iterations)\n loss_deque = deque(maxlen=100)\n\n loss_function = LOSS_FUNCTIONS[args.loss]\n\n for update_i in range(total_iterations):\n (cm_inputs, asv_inputs), targets, is_spoof = create_joint_sample_batch(\n asv_features,\n cm_features,\n speaker_label_to_indeces,\n target_indeces,\n nontarget_indeces,\n is_spoofs,\n BATCH_SIZE\n )\n\n asv_predictions = asv_model(*asv_inputs)\n cm_predictions = cm_model(*cm_inputs)\n\n # Predictions are logits, turn into probablities\n asv_predictions = torch.sigmoid(asv_predictions)\n cm_predictions = torch.sigmoid(cm_predictions)\n\n # Loss should return a scalar value we then backprop through\n # and update parameters to minimize it\n loss = loss_function(asv_predictions, cm_predictions, targets, is_spoof, args)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss_deque.append(loss.item())\n average_loss = np.mean(loss_deque)\n progress_bar.update(1)\n progress_bar.set_description(\"Loss: {:2.5f}\".format(average_loss))\n progress_bar.refresh()\n\n if args.snapshot_rate is not None and (update_i % args.snapshot_rate) == 0:\n save_models(\n args.output + \"_updates_{}\".format(update_i),\n asv_model,\n preprocessing_parameters,\n cm_feature_network,\n cm_model,\n bonafide_cm_features\n )\n\n\n if args.output is not None:\n save_models(\n args.output,\n asv_model,\n preprocessing_parameters,\n cm_feature_network,\n cm_model,\n bonafide_cm_features\n )",
"def main(s):\n w = s.wildcards\n i = s.input\n\n sample = f\"{w.sample}_{w.tumor}-{w.normal}\"\n show_output(f\"Loading {i.CNV} of sample {sample} for ASCAT conversion\")\n cnv_df = pd.read_csv(i.CNV, sep=\"\\t\", compression=\"gzip\")\n write_ASCAT(cnv_df, sample=sample, outpath=f\"CNV/{w.sample}/pre\")",
"def main(dataset_path, output_path):\n print(\"converting all snirf files in the input dataset to nwb...\")\n print(f\"dataset directory: {dataset_path}\")\n print(f\"output directory: {output_path}\")\n print()\n\n for subject_id in list_subject_ids(dataset_path):\n convert_subject_snirf_to_nwb(\n input_root=dataset_path, output_root=output_path, subject_id=subject_id\n )\n\n print()\n print(\"Conversion successful!\")",
"def main():\n \n print('=== nyc taxi to airport - step 3 clean data')\n\n if os.path.exists(output_file):\n print(\"output file exists:\", output_file)\n print(\"skipping\")\n return\n\n df = load_data(input_file)\n df = clean_data(df)\n save_as_pickle_gz(df, output_file)\n \n print('done')"
] |
[
"0.5408492",
"0.53339535",
"0.509573",
"0.5013642",
"0.49098614",
"0.490937",
"0.4868743",
"0.48444477",
"0.4825182",
"0.48239592",
"0.48233828",
"0.48192972",
"0.4798874",
"0.4790374",
"0.47887483",
"0.47765023",
"0.47627252",
"0.4761838",
"0.4724164",
"0.4715205",
"0.47145143",
"0.47044793",
"0.4693186",
"0.46694383",
"0.46568146",
"0.46546865",
"0.46295184",
"0.46227098",
"0.46214756",
"0.46136642"
] |
0.5364614
|
1
|
affinity map masking block by block. The block coordinates should be aligned with cloud storage.
|
def create_mask_affinity_map_tasks(task_queue, aff_input_layer_path, aff_output_layer_path,
aff_mip, mask_layer_path, mask_mip, output_block_start, output_block_size, grid_size ):
for z in tqdm(range(grid_size[0]), desc='z loop'):
for y in range(grid_size[1]):
for x in range(grid_size[2]):
output_bounds = Bbox.from_slices(tuple(slice(s+x*b, s+x*b+b)
for (s, x, b) in zip(output_block_start, (z, y, x), output_block_size)))
task = MaskAffinitymapTask(
aff_input_layer_path=aff_input_layer_path,
aff_output_layer_path=aff_output_layer_path,
aff_mip=aff_mip,
mask_layer_path=mask_layer_path,
mask_mip=mask_mip,
output_bounds=output_bounds,
)
task_queue.insert(task)
task_queue.wait()
vol = CloudVolume(output_layer_path, mip=aff_mip)
vol.provenance.processing.append({
'method': {
'task': 'InferenceTask',
'aff_input_layer_path': aff_input_layer_path,
'aff_output_layer_path': aff_output_layer_path,
'aff_mip': aff_mip,
'mask_layer_path': mask_layer_path,
'mask_mip': mask_mip,
'output_block_start': output_block_start,
'output_block_size': output_block_size,
'grid_size': grid_size,
},
'by': OPERATOR_CONTACT,
'date': strftime('%Y-%m-%d %H:%M %Z'),
})
vol.commit_provenance()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def calculate_mapping(self, mask):\n K, F, _ = mask.shape\n\n # (K, F, T)\n features = mask / np.linalg.norm(mask, axis=-1, keepdims=True)\n\n mapping = np.repeat(np.arange(K)[:, None], F, axis=1)\n\n for iterations, start, end in self.alignment_plan:\n for _ in range(iterations):\n # (K, T)\n centroid = np.sum(features[:, start:end, :], axis=1)\n centroid /= np.linalg.norm(centroid, axis=-1, keepdims=True)\n\n break_flag = False\n for f in range(start, end):\n reverse_permutation = self._align_segment(\n features[:, f, :], centroid,\n )\n if not (reverse_permutation == list(range(K))).all():\n break_flag = True\n features[:, f, :] = features[reverse_permutation, f, :]\n mapping[:, f] = mapping[reverse_permutation, f]\n if break_flag:\n break\n\n return mapping",
"def GenerateMapAffinity(img,nb_vertex,pointsInterest,objects_centroid,scale):\n\n # Apply the downscale right now, so the vectors are correct. \n img_affinity = Image.new(img.mode, (int(img.size[0]/scale),int(img.size[1]/scale)), \"black\")\n # Create the empty tensors\n totensor = transforms.Compose([transforms.ToTensor()])\n\n affinities = []\n for i_points in range(nb_vertex):\n affinities.append(torch.zeros(2,int(img.size[1]/scale),int(img.size[0]/scale)))\n \n for i_pointsImage in range(len(pointsInterest)): \n pointsImage = pointsInterest[i_pointsImage]\n center = objects_centroid[i_pointsImage]\n for i_points in range(nb_vertex):\n point = pointsImage[i_points]\n affinity_pair, img_affinity = getAfinityCenter(int(img.size[0]/scale),\n int(img.size[1]/scale),\n tuple((np.array(pointsImage[i_points])/scale).tolist()),\n tuple((np.array(center)/scale).tolist()), \n img_affinity = img_affinity, radius=1)\n\n affinities[i_points] = (affinities[i_points] + affinity_pair)/2\n\n\n # Normalizing\n v = affinities[i_points].numpy() \n \n xvec = v[0]\n yvec = v[1]\n\n norms = np.sqrt(xvec * xvec + yvec * yvec)\n nonzero = norms > 0\n\n xvec[nonzero]/=norms[nonzero]\n yvec[nonzero]/=norms[nonzero]\n\n affinities[i_points] = torch.from_numpy(np.concatenate([[xvec],[yvec]]))\n affinities = torch.cat(affinities,0)\n\n return affinities",
"def GenerateMapAffinity(img, nb_vertex, pointsInterest, objects_centroid, scale):\n\n # Apply the downscale right now, so the vectors are correct.\n img_affinity = Image.new(img.mode, (int(img.size[0] / scale), int(img.size[1] / scale)), \"black\")\n # Create the empty tensors\n totensor = transforms.Compose([transforms.ToTensor()])\n\n affinities = []\n for i_points in range(nb_vertex):\n affinities.append(torch.zeros(2, int(img.size[1] / scale), int(img.size[0] / scale)))\n\n for i_pointsImage in range(len(pointsInterest)):\n pointsImage = pointsInterest[i_pointsImage]\n center = objects_centroid[i_pointsImage]\n for i_points in range(nb_vertex):\n point = pointsImage[i_points]\n affinity_pair, img_affinity = getAfinityCenter(int(img.size[0] / scale),\n int(img.size[1] / scale),\n tuple((np.array(pointsImage[i_points]) / scale).tolist()),\n tuple((np.array(center) / scale).tolist()),\n img_affinity=img_affinity, radius=1)\n\n affinities[i_points] = (affinities[i_points] + affinity_pair) / 2\n\n # Normalizing\n v = affinities[i_points].numpy()\n\n xvec = v[0]\n yvec = v[1]\n\n norms = np.sqrt(xvec * xvec + yvec * yvec)\n nonzero = norms > 0\n\n xvec[nonzero] /= norms[nonzero]\n yvec[nonzero] /= norms[nonzero]\n\n affinities[i_points] = torch.from_numpy(np.concatenate([[xvec], [yvec]]))\n affinities = torch.cat(affinities, 0)\n\n return affinities",
"def mask(self, row_labels, col_labels):\n log = get_logger()\n self._is_debug(log) and log.debug(f\"ENTER::Partition.mask::{self._identity}\")\n new_obj = super().mask(row_labels, col_labels)\n if isinstance(row_labels, slice) and isinstance(self._length_cache, Future):\n if row_labels == slice(None):\n # fast path - full axis take\n new_obj._length_cache = self._length_cache\n else:\n new_obj._length_cache = DaskWrapper.deploy(\n func=compute_sliced_len, f_args=(row_labels, self._length_cache)\n )\n if isinstance(col_labels, slice) and isinstance(self._width_cache, Future):\n if col_labels == slice(None):\n # fast path - full axis take\n new_obj._width_cache = self._width_cache\n else:\n new_obj._width_cache = DaskWrapper.deploy(\n func=compute_sliced_len, f_args=(col_labels, self._width_cache)\n )\n self._is_debug(log) and log.debug(f\"EXIT::Partition.mask::{self._identity}\")\n return new_obj",
"def _mask(self, map_):\n return None",
"def _set_ghost_mask(self, rdd):\n tau, N, dom_mins, dom_maxs = self.tau, self.N, self.dom_mins, self.dom_maxs\n container_mins, container_maxs = self.container_mins, self.container_maxs\n buff_mins, buff_maxs = self.buff_mins, self.buff_maxs\n\n def ghost_map_wrapper(iterator): \n for arr in iterator: \n ghost_mask(arr, tau, N, container_mins, container_maxs, \n buff_mins, buff_maxs, dom_mins, dom_maxs)\n yield arr\n\n return rdd.mapPartitions(ghost_map_wrapper, preservesPartitioning=True)",
"def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm",
"def _apply_mask_and_get_affinity(seeds, niimg, radius, allow_overlap,\n mask_img=None):\n seeds = list(seeds)\n\n # Compute world coordinates of all in-mask voxels.\n if niimg is None:\n mask, affine = masking._load_mask_img(mask_img)\n # Get coordinate for all voxels inside of mask\n mask_coords = np.asarray(np.nonzero(mask)).T.tolist()\n X = None\n\n elif mask_img is not None:\n affine = niimg.affine\n mask_img = check_niimg_3d(mask_img)\n mask_img = image.resample_img(\n mask_img,\n target_affine=affine,\n target_shape=niimg.shape[:3],\n interpolation='nearest',\n )\n mask, _ = masking._load_mask_img(mask_img)\n mask_coords = list(zip(*np.where(mask != 0)))\n\n X = masking._apply_mask_fmri(niimg, mask_img)\n\n elif niimg is not None:\n affine = niimg.affine\n if np.isnan(np.sum(_safe_get_data(niimg))):\n warnings.warn(\n 'The imgs you have fed into fit_transform() contains NaN '\n 'values which will be converted to zeroes.'\n )\n X = _safe_get_data(niimg, True).reshape([-1, niimg.shape[3]]).T\n else:\n X = _safe_get_data(niimg).reshape([-1, niimg.shape[3]]).T\n\n mask_coords = list(np.ndindex(niimg.shape[:3]))\n\n else:\n raise ValueError(\"Either a niimg or a mask_img must be provided.\")\n\n # For each seed, get coordinates of nearest voxel\n nearests = []\n for sx, sy, sz in seeds:\n nearest = np.round(image.resampling.coord_transform(\n sx, sy, sz, np.linalg.inv(affine)\n ))\n nearest = nearest.astype(int)\n nearest = (nearest[0], nearest[1], nearest[2])\n try:\n nearests.append(mask_coords.index(nearest))\n except ValueError:\n nearests.append(None)\n\n mask_coords = np.asarray(list(zip(*mask_coords)))\n mask_coords = image.resampling.coord_transform(\n mask_coords[0], mask_coords[1], mask_coords[2], affine\n )\n mask_coords = np.asarray(mask_coords).T\n\n clf = neighbors.NearestNeighbors(radius=radius)\n A = clf.fit(mask_coords).radius_neighbors_graph(seeds)\n A = A.tolil()\n for i, nearest in enumerate(nearests):\n if nearest is None:\n continue\n\n A[i, nearest] = True\n\n # Include the voxel containing the seed itself if not masked\n mask_coords = mask_coords.astype(int).tolist()\n for i, seed in enumerate(seeds):\n try:\n A[i, mask_coords.index(list(map(int, seed)))] = True\n except ValueError:\n # seed is not in the mask\n pass\n\n sphere_sizes = np.asarray(A.tocsr().sum(axis=1)).ravel()\n empty_spheres = np.nonzero(sphere_sizes == 0)[0]\n if len(empty_spheres) != 0:\n raise ValueError(f'These spheres are empty: {empty_spheres}')\n\n if (not allow_overlap) and np.any(A.sum(axis=0) >= 2):\n raise ValueError('Overlap detected between spheres')\n\n return X, A",
"def self_affinity(self, sequence):\n pass",
"def object_mask(self):\n\n # Region file directory files\n if isinstance(self._region_file_dir, list):\n reg_files = {self._keyfunct(f): f for f in chain.from_iterable(glob.glob(f'{reg_dir}/*.reg')\n for reg_dir in self._region_file_dir)}\n else:\n reg_files = {self._keyfunct(f): f for f in glob.glob(f'{self._region_file_dir}/*.reg')}\n\n # Select out the IDs of the clusters needing additional masking\n clusters_to_mask = set(reg_files).intersection(self._catalog_dictionary)\n\n for cluster_id in clusters_to_mask:\n cluster_info = self._catalog_dictionary.get(cluster_id, None)\n region_file = reg_files.get(cluster_id, None)\n\n pixel_map_path = cluster_info['cov_mask_path']\n\n # Read in the coverage mask data and header.\n good_pix_mask, header = fits.getdata(pixel_map_path, header=True, ignore_missing_end=True, memmap=False)\n\n # Read in the WCS from the coverage mask we made earlier.\n w = WCS(header)\n\n try:\n assert w.pixel_scale_matrix[0, 1] == 0.\n pix_scale = (w.pixel_scale_matrix[1, 1] * w.wcs.cunit[1]).to(u.arcsec).value\n except AssertionError:\n cd = w.pixel_scale_matrix\n _, eig_vec = np.linalg.eig(cd)\n cd_diag = np.linalg.multi_dot([np.linalg.inv(eig_vec), cd, eig_vec])\n pix_scale = (cd_diag[1, 1] * w.wcs.cunit[1]).to(u.arcsec).value\n\n # Open the regions file and get the lines containing the shapes.\n with open(region_file, 'r') as region:\n objs = [ln.strip() for ln in region\n if ln.startswith('circle') or ln.startswith('box') or ln.startswith('ellipse')]\n\n # For each shape extract the defining parameters and define a path region.\n shapes_to_mask = []\n for mask in objs:\n\n # For circle shapes we need the center coordinate and the radius.\n if mask.startswith('circle'):\n # Parameters of circle shape are as follows:\n # params[0] : region center RA in degrees\n # params[1] : region center Dec in degrees\n # params[2] : region radius in arcseconds\n params = np.array(re.findall(r'[+-]?\\d+(?:\\.\\d+)?', mask), dtype=np.float64)\n\n # Convert the center coordinates into pixel system.\n # \"0\" is to correct the pixel coordinates to the right origin for the data.\n cent_xy = w.wcs_world2pix(params[0], params[1], 0)\n\n # Generate the mask shape.\n shape = Path.circle(center=cent_xy, radius=params[2] / pix_scale)\n\n # For the box we'll need...\n elif mask.startswith('box'):\n # Parameters for box shape are as follows:\n # params[0] : region center RA in degrees\n # params[1] : region center Dec in degrees\n # params[2] : region width in arcseconds\n # params[3] : region height in arcseconds\n # params[4] : rotation of region about the center in degrees\n params = np.array(re.findall(r'[+-]?\\d+(?:\\.\\d+)?', mask), dtype=np.float64)\n\n # Convert the center coordinates into pixel system.\n cent_x, cent_y = w.wcs_world2pix(params[0], params[1], 0)\n\n # Vertices of the box are needed for the path object to work.\n verts = [[cent_x - 0.5 * (params[2] / pix_scale), cent_y + 0.5 * (params[3] / pix_scale)],\n [cent_x + 0.5 * (params[2] / pix_scale), cent_y + 0.5 * (params[3] / pix_scale)],\n [cent_x + 0.5 * (params[2] / pix_scale), cent_y - 0.5 * (params[3] / pix_scale)],\n [cent_x - 0.5 * (params[2] / pix_scale), cent_y - 0.5 * (params[3] / pix_scale)]]\n\n # For rotations of the box.\n rot = Affine2D().rotate_deg_around(cent_x, cent_y, degrees=params[4])\n\n # Generate the mask shape.\n shape = Path(verts).transformed(rot)\n\n elif mask.startswith('ellipse'):\n # Parameters for ellipse shape are as follows\n # params[0] : region center RA in degrees\n # params[1] : region center Dec in degrees\n # params[2] : region semi-major axis in arcseconds\n # params[3] : region semi-minor axis in arcseconds\n # params[4] : rotation of region about the center in degrees\n # Note: For consistency, the semi-major axis should always be aligned along the horizontal axis\n # before rotation\n params = np.array(re.findall(r'[+-]?\\d+(?:\\.\\d+)?', mask), dtype=np.float64)\n\n # Convert the center coordinates into pixel system\n cent_xy = w.wcs_world2pix(params[0], params[1], 0)\n\n # Generate the mask shape\n shape = Ellipse(cent_xy, width=params[2] / pix_scale, height=params[3] / pix_scale, angle=params[4])\n shape = shape.get_path()\n\n # Return error if mask shape isn't known.\n else:\n raise KeyError(\n f'Mask shape is unknown, please check the region file of cluster: {region_file} {mask}')\n\n shapes_to_mask.append(shape)\n\n # Check if the pixel values are within the shape we defined earlier.\n # If true, set the pixel value to 0.\n pts = list(product(range(w.pixel_shape[0]), range(w.pixel_shape[1])))\n\n shape_masks = np.array(\n [shape.contains_points(pts).reshape(good_pix_mask.shape) for shape in shapes_to_mask])\n\n # Combine all the shape masks into a final object mask, inverting the boolean values so we can multiply\n # our mask with our existing good pixel mask\n total_obj_mask = ~np.logical_or.reduce(shape_masks)\n\n # Apply the object mask to the existing good pixel mask\n good_pix_mask *= total_obj_mask.astype(int)\n\n # Write the new mask to disk overwriting the old mask.\n new_mask_hdu = fits.PrimaryHDU(good_pix_mask, header=header)\n new_mask_hdu.writeto(pixel_map_path, overwrite=True)",
"def mask_sparse(self, threshold=10):\n self.MaskPrefix = 's' + self.MaskPrefix\n print('Masking pixels that do not have at least {0} coherent values'.format(threshold))\n # each pixel assigned an integer corresponding to # of igrams where coherent\n # NOTE: save coverage map if it doesn't exist already\n coverage = self.get_coverage()\n sparse = ma.masked_less(coverage, threshold)\n for ig in self.Set:\n igram = self.load_ma(ig)\n igram[sparse.mask] = ma.masked\n self.save_ma(ig, igram)\n print('Done')",
"def apply_mapping(mask, mapping):\n _, F = mapping.shape\n aligned_mask = np.zeros_like(mask)\n for f in range(F):\n aligned_mask[:, f, :] = mask[mapping[:, f], f, :]\n return aligned_mask",
"def applyMat(my_map, linsys_setup):\n \n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n \n \n\n nx, ny, pixScaleX, pixScaleY = map_prop\n nFreq = len(g_nu); nCluster = len(clumaps[0])\n\n #Always apply beam * precond\n beam_prec=[]\n\n for f in range(nFreq):\n beam_prec+=[beams[f][:,:ny/2+1]*precond_2d[:,:ny/2+1]]\n precond_2d=precond_2d[:,:ny/2+1]\n power_2d=power_2d[:,:ny/2+1]\n \n ksz = False\n if len(clumaps) == 2: ksz = True\n \n # Routines to perform block matrix multiplication defined in Eriksen Eq. 19\n \n def apply_cmb_cmb(d0):\n \"\"\"\n Apply (S^-1 + A N^-1 A) x\n \"\"\"\n d1 = d0.copy()\n d1 = numpy.reshape(d1,(nx,ny))\n a_l = fft.rfft(d1,axes=[-2,-1])\n \n c_l = 0\n for f in range(nFreq):\n\n b_l = a_l * beam_prec[f]\n d2 = fft.irfft(b_l,axes=[-2,-1],normalize=True)\n d2 *= ninvs[f]\n b_l = fft.rfft(d2,axes=[-2,-1])\n c_l += b_l * beam_prec[f]\n \n d2 = fft.irfft(c_l,axes=[-2,-1],normalize=True)\n d1 = fft.irfft(precond_2d**2 * a_l/power_2d,axes=[-2,-1],normalize=True)\n \n d2 += d1\n \n return d2.reshape((nx*ny,))\n \n \"\"\"\n def apply_tsz_tsz(d0): # DONE\n \\\"\"\"\n Apply (F^T A^T N^-1 A F) x\n \\\"\"\"\n d1 = numpy.zeros(nCluster)\n mat = numpy.zeros((nCluster, nCluster))\n # TODO: This could probably be more efficient (e.g. using np.outer)\n for freq in range(nFreq):\n for ic in range(nCluster):\n for jc in range(0, ic+1):\n mat[ic,jc] = numpy.sum( ninvs[freq] * g_nu[freq]**2. \\\n * clumaps[0][ic][freq] * clumaps[0][jc][freq] )\n if ic != jc: mat[jc,ic] = mat[ic,jc]\n d1 += numpy.dot(mat, d0)\n return d1\n \n def apply_ksz_ksz(d0): # DONE\n \\\"\"\"\n Apply (K^T A^T N^-1 A K) x\n \\\"\"\"\n # FIXME: Missing factor of ivcov\n d1 = numpy.zeros(nCluster)\n mat = numpy.zeros((nCluster, nCluster))\n # TODO: This could probably be more efficient (e.g. using np.outer)\n for freq in range(nFreq):\n for ic in range(nCluster):\n for jc in range(0, ic+1):\n mat[ic,jc] = numpy.sum( ninvs[freq] \\\n * clumaps[1][ic][freq] * clumaps[1][jc][freq] )\n if ic != jc: mat[jc,ic] = mat[ic,jc]\n d1 += numpy.dot(mat, d0)\n d1 += numpy.dot(ivcov, d0) # Add prior term\n return d1\n \"\"\"\n \n def apply_cmb_foreground_block(dc, dm, dt, dk=None):\n \"\"\"\n Apply the CMB x (Monopole + TSZ + KSZ) terms in one block:\n (A^T N^-1 A T) x_mono\n (A^T N^-1 A F) x_tsz\n (A^T N^-1 A K) x_ksz\n \n (T^T A^T N^-1 A) x_cmb\n (F^T A^T N^-1 A) x_cmb\n (K^T A^T N^-1 A) x_cmb\n \"\"\"\n ksz = False\n if dk is not None: ksz = True\n \n # (A^T N^-1 A T) x_mono; (A^T N^-1 A F) x_tsz; (A^T N^-1 A K) x_ksz\n b_lt = 0; b_lk = 0; b_lm = 0\n for f in range(nFreq):\n mct = 0; mck = 0\n for ic in range(nCluster):\n mct += dt[ic] * ninvs[f] * clumaps[0][ic][f] * g_nu[f]\n if ksz: mck += dk[ic] * ninvs[f] * clumaps[1][ic][f]\n \n b_lm += fft.rfft(dm * ninvs[f],axes=[-2,-1]) * beam_prec[f]\n b_lt += fft.rfft(mct,axes=[-2,-1]) * beam_prec[f]\n if ksz: b_lk += fft.rfft(mck,axes=[-2,-1]) * beam_prec[f]\n\n mcm = fft.irfft(b_lm,axes=[-2,-1],normalize=True).reshape((nx*ny,))\n mct = fft.irfft(b_lt,axes=[-2,-1],normalize=True).reshape((nx*ny,))\n if ksz: mck = fft.irfft(b_lk,axes=[-2,-1],normalize=True).reshape((nx*ny,))\n \n # (T^T A^T N^-1 A) x_cmb; (F^T A^T N^-1 A) x_cmb; (K^T A^T N^-1 A) x_cmb\n mc = dc.copy().reshape((nx,ny))\n a_l = fft.rfft(mc,axes=[-2,-1])\n mtc = numpy.zeros(nCluster)\n mkc = numpy.zeros(nCluster)\n mmc = 0\n for f in range(nFreq):\n b_l = a_l * beam_prec[f]\n mc = fft.irfft(b_l,axes=[-2,-1],normalize=True)\n mmc += numpy.sum(mc * ninvs[f])\n for ic in range(nCluster):\n mtc[ic] += numpy.sum(mc * ninvs[f] * clumaps[0][ic][f] * g_nu[f])\n if ksz: mkc[ic] += numpy.sum(mc * ninvs[f] * clumaps[1][ic][f])\n \n if ksz: return mct, mcm, mck, mtc, mmc, mkc\n return mct, mcm, mtc, mmc\n \n \n def apply_foreground_block(m0, t0, k0=None):\n \"\"\"\n Apply the TSZ + KSZ + Monopole terms in one block:\n [ (T^T A^T N^-1 A F) (T^T A^T N^-1 A K) (T^T A^T N^-1 A T) ] (x_mono)\n [ (F^T A^T N^-1 A F) (F^T A^T N^-1 A K) (F^T A^T N^-1 A T) ] (x_tsz)\n [ (K^T A^T N^-1 A F) (K^T A^T N^-1 A K) (K^T A^T N^-1 A T) ] (x_ksz)\n \"\"\"\n ksz = True if k0 is not None else False\n \n dtt, dkk, dtk, dkt = [numpy.zeros(nCluster) for i in range(4)]\n mtt, mkk, mtk, mkt = [numpy.zeros((nCluster, nCluster)) for i in range(4)]\n dmm, dmk, dmt = [0 for i in range(3)]\n dkm, dtm = [numpy.zeros(nCluster) for i in range(2)]\n \n # TODO: This could probably be more efficient (e.g. using np.outer)\n for f in range(nFreq):\n dmm += numpy.sum(ninvs[f]) * m0\n \n # Loop through clusters\n for ic in range(nCluster):\n dmt += numpy.sum( ninvs[f] * g_nu[f] * clumaps[0][ic][f] * t0[ic] )\n dtm[ic] += numpy.sum( ninvs[f] * g_nu[f] * clumaps[0][ic][f] * m0 )\n if ksz:\n dmk += numpy.sum( ninvs[f] * clumaps[1][ic][f] * k0[ic] )\n dkm[ic] += numpy.sum( ninvs[f] * clumaps[1][ic][f] * m0 )\n \n for jc in range(0, ic+1):\n mtt[ic,jc] = numpy.sum( ninvs[f] * g_nu[f]**2. \\\n * clumaps[0][ic][f] * clumaps[0][jc][f] )\n if ksz:\n mkk[ic,jc] = numpy.sum( ninvs[f] \\\n * clumaps[1][ic][f] * clumaps[1][jc][f] )\n mtk[ic,jc] = numpy.sum( ninvs[f] * g_nu[f] \\\n * clumaps[0][ic][f] * clumaps[1][jc][f] )\n mkt[ic,jc] = numpy.sum( ninvs[f] * g_nu[f] \\\n * clumaps[1][ic][f] * clumaps[0][jc][f] )\n # Mirror indices\n mtt[jc,ic] = mtt[ic,jc]\n if ksz:\n mkk[jc,ic] = mkk[ic,jc]\n mtk[jc,ic] = mtk[ic,jc]\n mkt[jc,ic] = mkt[ic,jc]\n \n # Add total contribs. for this band\n dtt += numpy.dot(mtt, t0)\n if ksz:\n dkk += numpy.dot(mkk, k0)\n dtk += numpy.dot(mtk, k0)\n dkt += numpy.dot(mkt, t0)\n \n if ksz: return dtt, dkk, dmm, dtk, dkt, dmk, dkm, dtm, dmt\n return dtt, dmm, dtm, dmt\n \n # Apply block matrix multiplications and return\n # FIXME: What if KSZ not used?\n x0 = my_map[:nx*ny]\n x1 = my_map[nx*ny:nx*ny+1]\n x2 = my_map[nx*ny+1:nx*ny+nCluster+1]\n if ksz: x3 = my_map[nx*ny+nCluster+1:nx*ny+2*nCluster+1]\n \n # Multiply input vector in blocks\n #t=time.time()\n dcc = apply_cmb_cmb(x0)\n #print 'CMB', time.time()-t\n if ksz:\n dct, dcm, dck, dtc, dmc, dkc = apply_cmb_foreground_block(x0, x1, x2, x3)\n dtt, dkk, dmm, dtk, dkt, dmk, dkm, dtm, dmt = apply_foreground_block(x1, x2, x3)\n x_new_0 = dcc + dct + dck + dcm\n x_new_1= dmc + dmt + dmk + dmm\n x_new_2 = dtc + dtt + dtk + dtm\n x_new_3 = dkc + dkt + dkk + dkm\n x_new = numpy.concatenate((x_new_0, x_new_1, x_new_2, x_new_3))\n else:\n #t=time.time()\n dct, dcm, dtc, dmc = apply_cmb_foreground_block(x0, x1, x2)\n #print 'CMB-F', time.time()-t\n \n #t=time.time()\n dtt, dmm, dtm, dmt = apply_foreground_block(x1, x2)\n #print 'F', time.time()-t\n \n x_new_0 = dcc + dct + dcm\n x_new_1 = dmc + dmt + dmm\n x_new_2 = dtc + dtt + dtm\n x_new = numpy.concatenate((x_new_0, x_new_1, x_new_2))\n\n\n#sys.exit()\n return x_new",
"def create_hard_blocks(self):\n for x in xrange(1, self.map_size[0], 2):\n for y in xrange(1, self.map_size[1], 2):\n self.create_hard_block_at(x, y)",
"def pre_process_for_classifying(block, scale=(0, 1, 1024)):\n print('\\nClassifying Data')\n lorentz_arrays_list = block[1]\n data_arrays_list = block[2]\n block_size = len(lorentz_arrays_list)\n cluster_labels = np.empty((0, 1))\n cluster_data = np.empty((0, scale[2]))\n pool = mp.Pool(mp.cpu_count())\n results = pool.map(\n classify, [\n (i, lorentz_arrays_list, data_arrays_list) for i in\n range(block_size)])\n pool.close()\n for result in results:\n cluster_labels = np.append(cluster_labels, result[0], axis=0)\n cluster_data = np.append(cluster_data, result[1], axis=0)\n return cluster_labels, cluster_data",
"def prepare_map(self):\n for y_coord, row in enumerate(self.contents):\n for x_coord, tile in enumerate(row):\n bit_map = self.get_tile_bitmap(tile)\n self.image[y_coord * TILE_SIZE:(y_coord+1) * TILE_SIZE,\n x_coord * TILE_SIZE:(x_coord+1) * TILE_SIZE] = bit_map",
"def BlockToMatrix(self):\n for h in range(height):\n for w in range(width):\n if self.matrix[h][w] == 2:\n self.matrix[h][w] = 0\n for i in self.coords:\n self.matrix[i[1]][i[0]] = 2",
"def set_blockmask(self, mask: Iterable):\n\n block_ids = sorted(set(mask))\n\n if len(block_ids) != self.n_blocks_:\n raise IndexError(f'size mismatch between n_block: {self.n_blocks_} and '\n f'a number of specified blockmask: n={len(block_ids)}')\n\n # verify whether the mask is correct sequence of indecies\n for i, id in enumerate(block_ids):\n if i != id:\n raise ValueError(f'mask must be a sequence of indeies, i.e., [1,2,...,N]')\n\n self.block_masks_ = numpy.asarray(mask, dtype=int)",
"def apply(self, mode='lateral'):\n num_lat_slices = self.img3d.shape[0]\n num_cor_slices = self.img3d.shape[2]\n bin_mask = np.zeros(self.mask3d.shape)\n x,y,z = np.where(self.mask3d==self.vertebra_id)\n bin_mask[np.min(x):np.max(x), np.min(y):np.max(y), np.min(z):np.max(z)] = 1\n if mode=='lateral' or mode=='fuse':\n mask_lat = np.zeros((6, self.mask3d.shape[0], self.mask3d.shape[1], self.mask3d.shape[2]))\n img_lat = np.zeros(self.img3d.shape)\n binary_lat = np.zeros(self.mask3d.shape)\n # for each lateral slice\n for idx in range(num_lat_slices):\n img_slice, mask_slice = np.copy(self.img3d[idx, :, :]), np.copy(self.mask3d[idx, :, :])\n xloc, yloc = np.where(mask_slice==self.vertebra_id)\n # check if vertebra is present in image\n if xloc.shape[0]==0:\n # if not keep mask as it is\n mask_lat[:,idx, :, :] = self.get_one_hot(mask_slice)\n img_lat[idx, :, :] = img_slice\n else:\n min_x, max_x = np.min(xloc), np.max(xloc)\n min_y, max_y = np.min(yloc), np.max(yloc)\n inpainted_img, inpainted_mask, binary_mask = self.inpaint(img_slice, mask_slice, min_x, max_x, min_y, max_y)\n mask_lat[:,idx, :, :] = inpainted_mask\n img_lat[idx,:, :] = inpainted_img\n binary_lat[idx,:,:] = binary_mask\n\n\n if mode=='coronal' or mode=='fuse':\n mask_cor = np.zeros((6, self.mask3d.shape[0], self.mask3d.shape[1], self.mask3d.shape[2]))\n img_cor = np.zeros(self.img3d.shape)\n binary_cor = np.zeros(self.mask3d.shape)\n # for each coronal slice\n for idx in range(num_cor_slices):\n img_slice, mask_slice = np.copy(self.img3d[:, :, idx]), np.copy(self.mask3d[:, :, idx])\n xloc, yloc = np.where(mask_slice==self.vertebra_id)\n # check if vertebra is present in image\n if xloc.shape[0]==0:\n # if not keep mask as it is\n mask_cor[:, :, :, idx] = self.get_one_hot(mask_slice)\n img_cor[:, :, idx] = img_slice\n else:\n min_x, max_x = np.min(xloc), np.max(xloc)\n min_y, max_y = np.min(yloc), np.max(yloc)\n # else remove fractured vertebra and inpaint\n inpainted_img, inpainted_mask, binary_mask = self.inpaint(img_slice, mask_slice, min_x, max_x, min_y, max_y, 'coronal')\n mask_cor[:, :, :, idx] = inpainted_mask\n img_cor[:, :, idx] = inpainted_img\n binary_cor[:,:,idx] = binary_mask\n \n # return to a one channel mask and convert labels back\n if mode=='lateral':\n mask_lat = np.argmax(mask_lat, axis=0)\n mask_lat = self.map_class_to_vert(mask_lat)\n self.mask3d = mask_lat\n self.img3d = img_lat\n elif mode=='coronal':\n mask_cor = np.argmax(mask_cor, axis=0)\n mask_cor = self.map_class_to_vert(mask_cor)\n self.mask3d = mask_cor\n self.img3d = img_cor\n elif mode=='fuse':\n mask_fuse = mask_cor*0.5+mask_lat*0.5\n mask_fuse = np.argmax(mask_fuse, axis=0)\n mask_fuse = self.map_class_to_vert(mask_fuse)\n self.mask3d = mask_fuse\n self.img3d = (img_lat+img_cor)/2\n \n # save result\n self.mask3d = self.mask3d.astype(np.uint8)\n self.img3d = self.img3d.astype(np.float32)\n \n # put back if we padded and cropped\n if self.padz and self.padx:\n self.orig_img3d[:,self.ymin:self.ymax, :] = self.img3d[self.xcrop1:-self.xcrop2,:,self.zcrop1:-self.zcrop2]\n self.orig_mask3d[:,self.ymin:self.ymax, :] = self.mask3d[self.xcrop1:-self.xcrop2,:,self.zcrop1:-self.zcrop2]\n elif self.padz and not self.padx:\n self.orig_img3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, :] = self.img3d[:,:,self.zcrop1:-self.zcrop2]\n self.orig_mask3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, :] = self.mask3d[:,:,self.zcrop1:-self.zcrop2]\n elif not self.padz and self.padx:\n self.orig_img3d[:,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.img3d[self.xcrop1:-self.xcrop2,:,:]\n self.orig_mask3d[:,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.mask3d[self.xcrop1:-self.xcrop2,:,:]\n else:\n self.orig_img3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.img3d\n self.orig_mask3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.mask3d\n \n img = return_scan_to_orig(self.orig_img3d, self.mask_affine, self.mask_header, self.zooms)\n nib.save(img, self.inpainted_img_path)\n\n mask_fuse = return_scan_to_orig(self.orig_mask3d, self.mask_affine, self.mask_header, self.zooms, np.uint8)\n nib.save(mask_fuse, self.inpainted_mask_path)\n print('Inpaint mask and image saved at: ', self.inpainted_mask_path, self.inpainted_img_path)",
"def mask_weight(region_key,lon,lat):\r\n\t\tlon_res = lon[1] - lon[0];lat_res = lat[1] - lat[0];\r\n\t\tlons,lats = np.meshgrid(lon,lat)\r\n\t\tarea = AreaWeight(lons,lons+lon_res,lats,lats+lat_res)\r\n\r\n\t\t##OCEAN_MASKS FOR COUNTRIES\r\n\t\tocean_mask = sio.loadmat('/home/s1667168/coding/python/external_data/Euro_USA_AUS_BRICS_STA_720_360.mat')\r\n\t\tlon_mask = ocean_mask['lon'][0,:];\r\n\t\tlat_mask = ocean_mask['lat'][0,:];\r\n\t\tbox_region_dic={'All':[0,360,-90,90],'ASIA':[65,145,5,45],'US':[240,290,30,50],'ARCTIC':[0,360,60,90],'TROPICS':[0,360,-28,28],'EUROPE':[0,40,30,70],}\r\n\t\tif (region_key == 'USA' or region_key == 'Europe' or region_key == 'India' or region_key == 'China' or region_key == 'GloLand'):\r\n\t\t\tmask= ocean_mask[region_key][:]\r\n\t\telif region_key in box_region_dic:\r\n\t\t\tmask= ocean_mask['All'][:]\r\n\t\t\tbox = box_region_dic[region_key]\r\n\t\t\tmask = box_clip(box[0],box[1],box[2],box[3],lon_mask,lat_mask,mask)\r\n\t\telse:\r\n\t\t\tprint \"error region name\"\r\n\t\t\r\n\t\t# interpolate from 360*720 to 192*288\r\n\t\tmask[np.isnan(mask)]=0;\tmask[mask>0]=1;\r\n\t\tf = interp2d(lon_mask, lat_mask, mask,kind='linear'); mask = f(lon, lat);\r\n\t\t# plt.imshow(mask,origin='lower');plt.show()\r\n\t\tmask[mask >= 1] = 1;mask[mask < 1] = 0;\r\n\t\t# weight each grid cell by its area weight against the total area\r\n\t\tmask[mask==0] = np.nan\r\n\t\tmask=np.multiply(mask,area); \r\n\t\tmask_weighted = np.divide(mask,np.nansum(np.nansum(mask,axis=1),axis=0))\r\n\t\t# print np.nansum(np.nansum(mask_weighted,axis=1),axis=0)\r\n\t\treturn mask_weighted",
"def __call__(self, mask):\n mapping = self.calculate_mapping(mask)\n return self.apply_mapping(mask, mapping)",
"def num_47():\n\n def block_reshape(a, rows, cols, nodata=-1, as_masked=True):\n \"\"\" \"\"\"\n s = np.array(a.shape)\n w = np.array([rows, cols])\n m = divmod(s, w)\n new_shape = w*m[0] + w*(m[1]!=0)\n ypad, xpad = new_shape - a.shape\n pad = ((0, ypad), (0, xpad))\n p_with =((nodata, nodata), (nodata, nodata))\n b = np.pad(a, pad_width=pad, mode='constant', constant_values=p_with)\n w_y, w_x = w # Blocksize\n y, x = b.shape # padded array\n c = b.reshape((y//w_y, w_y, x//w_x, w_x))\n c = c.swapaxes(1, 2).reshape(-1, w_y, w_x)\n if as_masked:\n mask_val = nodata\n c = np.ma.masked_equal(c, mask_val)\n c.set_fill_value(mask_val)\n return b, c\n y, x = 5, 6\n rows, cols = [3, 4]\n nodata = -1\n a = np.arange(x*y).reshape(y,x)\n b, c = block_reshape(a, rows, cols, nodata)\n print(\"\\n{}\".format(num_47.__doc__))\n print(\"a\\n{}\\nb\\n{}\\nc\\n{}\".format(a, b, c))\n return a, b, c",
"def mask(self):",
"def test_get_mask(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n mask = spine_data_loader.get_mask(str(idx))\n assert mask.shape == (256, 256, 1)\n assert mask.dtype == 'int64'",
"def make_inference_attention_mask_3d(source_block, target_block, pad_id):\n # mask = (target_block[:, None, :] != pad_id) * (source_block[:, :, None] != pad_id)\n return make_attention_mask_3d(source_block != pad_id, target_block != pad_id)",
"def block_edges(self, block_id, remap_local=...): # -> tuple[Unknown, Unknown, Unknown]:\n ...",
"def test_fix_mask(self):\n fixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_fixable_mask.map'))\n self.assertFalse(fixable_mask.is_mask)\n fixable_mask.fix_mask()\n self.assertTrue(fixable_mask.is_mask)",
"def preprocess_multicluster(adj, parts, features, y_train, train_mask, num_clusters, block_size, diag_lambda=1):\n features_batches = []\n support_batches = []\n y_train_batches = []\n train_mask_batches = []\n total_nnz = 0\n np.random.shuffle(parts)\n\n for _, st in enumerate(range(0, num_clusters, block_size)):\n pt = parts[st]\n for pt_idx in range(st + 1, min(st + block_size, num_clusters)):\n pt = np.concatenate((pt, parts[pt_idx]), axis=0)\n features_batches.append(features[pt, :])\n y_train_batches.append(y_train[pt, :])\n support_now = adj[pt, :][:, pt]\n support_batches.append(sparse_to_tuple(normalize_adj_diag_enhance(support_now, diag_lambda=diag_lambda)))\n total_nnz += support_now.count_nonzero()\n\n train_pt = []\n for newidx, idx in enumerate(pt):\n if train_mask[idx]:\n train_pt.append(newidx)\n train_mask_batches.append(sample_mask(train_pt, len(pt)))\n\n return features_batches, support_batches, y_train_batches, train_mask_batches",
"def apply_mask(self):\n for mask, param in self.masked_parameters:\n param.mul_(mask)",
"def testWarpMask(self):\n for kernelName, maskKernelName in (\n (\"bilinear\", \"bilinear\"),\n (\"lanczos3\", \"lanczos3\"),\n (\"lanczos3\", \"bilinear\"),\n (\"lanczos4\", \"lanczos3\"),\n ):\n for growFullMask in (0, 1, 3, 0xFFFF):\n self.verifyMaskWarp(\n kernelName=kernelName,\n maskKernelName=maskKernelName,\n growFullMask=growFullMask,\n )"
] |
[
"0.62302107",
"0.5893791",
"0.5769649",
"0.57137156",
"0.56408554",
"0.5601151",
"0.55934393",
"0.55531865",
"0.54844314",
"0.5449009",
"0.5445731",
"0.5406244",
"0.53493595",
"0.53390515",
"0.533312",
"0.5310451",
"0.52919894",
"0.524323",
"0.51834995",
"0.5173451",
"0.51632",
"0.51557785",
"0.5137626",
"0.51348925",
"0.51170033",
"0.51168174",
"0.51149344",
"0.50956935",
"0.50910985",
"0.50904346"
] |
0.6456307
|
0
|
Test case for card_info_lookup
|
def test_card_info_lookup(self):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_cards_get(self):\n pass",
"def _get_card(self, name: str) -> Dict:",
"def test_get_info(self):\n pass",
"def test_lookup_account(self):\n pass",
"def test_cards_get_list(self):\n pass",
"def get_card_info(card_number: Union[int, str]) -> dict:\n card_nums = [int(x) for x in str(card_number)]\n is_number_valid = True\n valid_length = True\n issuer = None\n reason = None\n\n # Validate by checking the following:\n # - check digit is correct\n # - iin is recognized\n # - card number has a valid length\n\n check_digit = get_check_digit(card_nums[:-1])\n if card_nums[-1] != check_digit:\n # perform first check, check digit check\n is_number_valid = False\n reason = reason_invalid_check_digit\n else:\n # perform second check, is iin recognized\n iin = None\n for i in range(6):\n iin_candidate = ''.join(str(x) for x in card_nums[:i + 1])\n if IIN.objects.filter(iin=iin_candidate).exists():\n iin = IIN.objects.get(iin=iin_candidate)\n issuer = iin.issuer\n break\n if iin is None:\n is_number_valid = False\n reason = reason_unrecognized_issuer\n\n if not reason and issuer:\n # perform final check, is length valid\n valid_length = len(card_nums) in issuer.get_valid_lengths()\n if not valid_length:\n reason = reason_invalid_length\n\n if not is_number_valid or not issuer or not valid_length:\n info = {'is_valid': False, 'reason': reason}\n else:\n major_industry = MajorIndustry.objects.get(pk=card_nums[0])\n info = {\n 'is_valid': True,\n 'mii_digit': card_nums[0],\n 'issuer_category': major_industry.issuer_category,\n 'iin': int(''.join(str(x) for x in card_nums[:6])),\n 'issuing_network': issuer.network_name,\n 'account_number': int(''.join(str(x) for x in card_nums[6:-1])),\n 'check_digit': check_digit,\n }\n\n return info",
"def match_info(info_dict):\n return True",
"def test_droid_lookup(self):\n droid_out_test_path = os.path.join(THIS_DIR, 'droid-blobs.out')\n droid_lookup = DroidLookup(droid_out_test_path)\n self.assertEqual(droid_lookup.get_entry_count(), 5)\n self.assertEqual(droid_lookup.get_puid('2806d9cd6666d6aae3934e87905fc3291c4b1693'),\n 'Unknown')\n self.assertEqual(droid_lookup.get_puid('3f4a7df345d3ccdf8bf5a6e78d1d3c59d3641772'),\n 'fmt/18')\n self.assertEqual(droid_lookup.get_puid('c37672e0ba9afef20d4f053da5c68621ed6bb507'),\n 'fmt/11')\n self.assertEqual(droid_lookup.get_puid('a2dc22c72b9a615e2114000572ea217c8e36e382'),\n 'fmt/19')\n self.assertEqual(droid_lookup.get_puid('5d2fa9ef4448099821f0a56d29c9017d50b63f7e'),\n 'fmt/19')",
"def test_lookup_entry_by_name(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.assertEqual(\"12345\", self.phonebook.lookup(\"Bob\"))",
"def read_lookup_table(hole_cards, lookup_table):\n sorted_hole = sorted(hole_cards)\n sorted_hole.reverse()\n card_strings = [Card.int_to_str(card) for card in sorted_hole]\n\n if card_strings[0][1] != card_strings[1][1]:\n suited = False\n else:\n suited = True\n card_strings[0] = card_strings[0][0] + 'd'\n if suited:\n card_strings[1] = card_strings[1][0] +'d'\n else:\n card_strings[1] = card_strings[1][0] +'s'\n card_strings = tuple(card_strings)\n return lookup_table[card_strings]",
"def test_get_pci_coprocessor_card_list(self):\n pass",
"def test_get_pci_coprocessor_card_by_moid(self):\n pass",
"def test_get_card(self):\n card = CardFactory()\n self.session.commit()\n resp = self.app.get('cards/1')\n data = resp.json\n\n assert resp.status_code == 200\n\n assert data['first_name'] == card.first_name\n assert data['last_name'] == card.last_name\n assert data['variety'] == card.variety",
"def get_info(self, card):\n\n try:\n\n info = subprocess.check_output(\n [\"youtube-dl\", \"-i\", \"-j\",\n f\"ytsearch: {card[0]} {card[1]} audiobook\"])\n\n self.details = json.loads(info)\n\n except:\n\n return",
"def read_card(name_card):\n\n card=Cards.Card(name_card)\n return card.info",
"def _get_card(self, name: str, method: str = 'exact') -> Dict:\n\n if not self.card:\n try:\n logger.info(f'About to request for {name} card')\n async_result = make_request.delay(url=f'{self.REQUEST[\"API\"]}{self.REQUEST[\"CARDS_ENDPOINT\"]}?{method}={name}')\n card = async_result.get()\n card_info_url = card['prints_search_uri']\n async_result = make_request.delay(url=card_info_url)\n card_info = async_result.get()\n self.card = card_info['data']\n return self.card\n except requests.HTTPError:\n try:\n logger.info(f'Unable to locate card exactly named {name}, looking into fuzzy naming...')\n self._get_card(name=name, method='fuzzy')\n except requests.HTTPError as err:\n raise err\n else:\n return self.card",
"def lookup():",
"def test_get_pay_in_details(self):\n pass",
"def test_get_details7(self):\n pass",
"def test_data_object_get_details(self):\n pass",
"def testCard(self):\n # test1\n cardObj1 = Card('A','d')\n self.assertEquals(1,cardObj1.get_rank())\n self.assertEquals('d',cardObj1.get_suit())\n # test2\n cardObj2 = Card('J','d')\n self.assertEquals(10,cardObj2.get_rank())\n # test3\n cardObj3 = Card(5,'d')\n self.assertEquals(5,cardObj3.get_rank())",
"def test_retrieve_1_by_1(self):\n swa = frontend.SupplyWinApi()\n query_dict = dict(\n dev=\"rrenaud\",\n targets=\"Council Room\",\n interaction=\"Farming Village\",\n unconditional=\"true\",\n )\n\n card_stats = swa.retrieve_data(query_dict)\n\n self.assertEquals(len(card_stats), 2)\n\n self.assertEquals(card_stats[0]['card_name'], 'Council Room')\n self.assertEquals(card_stats[0]['condition'][0], 'Farming Village')\n\n self.assertEquals(card_stats[1]['card_name'], 'Council Room')\n self.assertEquals(len(card_stats[1]['condition']), 0)\n\n json = swa.readable_json_card_stats(card_stats)\n self.assertEquals(json[0:14], '[{\"card_name\":')",
"def test_external_card(self):\n entries = {\n 'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qCardNumber': [1234567],\n 'qCardExternalNumber': [5],\n 'qCardExternalDescription': ['Cluster 3'],\n 'qCardExternalDepositMade': [True],\n }\n }\n clone(entries)\n # Check if ExternalCard+ExternalCardLoan are correctly created\n self.assertEqual(1, ExternalCard.objects.count())\n self.assertEqual(1, ExternalCardLoan.objects.count())\n card = ExternalCard.objects.first()\n loan = ExternalCardLoan.objects.first()\n self.assertEqual(1234567, card.card_number)\n self.assertEqual(5, card.reference_number)\n self.assertEqual('Cluster 3', card.description)\n self.assertIsNone(loan.end)\n self.assertEqual('y?', loan.deposit_made)\n # Make sure that TU/e card number is NOT set!\n self.assertIsNone(Person.objects.first().tue_card_number)",
"def test_get_access_information(self):\n\n self.assertEqual(account.get_access_information('123'), account.INVALID_CODE_ERR_MSG)",
"def get_info(self, info):\r\n pass",
"def get_card (self, card):\n\t\treturn self._card",
"def test_card_value(mock_card):\n assert mock_card.value == 1",
"def test_address_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known address\n self.assertEqual('address_info', rpc.get_address_info('10.0.0.1'))\n # test with unknown address\n with self.assertRaises(RPCError) as exc:\n rpc.get_address_info('10.0.0.0')\n self.assertEqual(Faults.BAD_ADDRESS, exc.exception.code)\n self.assertEqual('BAD_ADDRESS: address 10.0.0.0 unknown in Supvisors',\n exc.exception.text)",
"def test_set_accessory_info():\n acc = MockAccessory('Accessory')\n set_accessory_info(acc, 'name', 'model', 'manufacturer', '0000')\n\n assert len(acc.services) == 1\n serv = acc.services[0]\n\n assert serv.display_name == SERV_ACCESSORY_INFO\n assert len(serv.characteristics) == 4\n chars = serv.characteristics\n\n assert chars[0].display_name == CHAR_NAME\n assert chars[0].value == 'name'\n assert chars[1].display_name == CHAR_MODEL\n assert chars[1].value == 'model'\n assert chars[2].display_name == CHAR_MANUFACTURER\n assert chars[2].value == 'manufacturer'\n assert chars[3].display_name == CHAR_SERIAL_NUMBER\n assert chars[3].value == '0000'",
"def getCard(id):\n r = requests.get('https://api.scryfall.com/cards/multiverse/' + str(id))\n data = r.json()\n try:\n name = data['name']\n except KeyError:\n name = ''\n try:\n url = data['image_uris']['normal']\n except KeyError:\n url = ''\n return name, url"
] |
[
"0.6776132",
"0.64844984",
"0.63596356",
"0.61986995",
"0.6173423",
"0.6162049",
"0.6052326",
"0.60388124",
"0.59450364",
"0.59148693",
"0.59128034",
"0.59102505",
"0.5837992",
"0.5827626",
"0.5804711",
"0.57768077",
"0.57597846",
"0.5734827",
"0.57277155",
"0.5709747",
"0.57018083",
"0.5680859",
"0.56306535",
"0.5618926",
"0.5618565",
"0.5502146",
"0.5486541",
"0.5485821",
"0.5472889",
"0.5458942"
] |
0.93080074
|
0
|
Test case for lookup_account
|
def test_lookup_account(self):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_duo_account_get(self):\n pass",
"def bank_lookup_account(stub, request):\n # print(\"In method bank_lookup_account:\")\n result = stub.LookUpAccount(request) # <-- remember to check whether port is occupied!\n # from line 26 to 28 seem never gonna be reached!\n if result is None:\n return \"Unable to find record\"\n # raise AccountNotExistError\n # print(result)\n return result",
"def test_retrieve_iso20022_account_statement(self):\n pass",
"def test_client_bank_account_retrieve(self):\n pass",
"def get_account(self, account):\n \n pass",
"def test_get_accounts_names(self):\n pass",
"def test_search_by_account(self):\n self.new_credentials.save_creds()\n account_found = Credentials.search_by_account(\"Instagram\")\n \n self.assertEqual(account_found.username,self.new_credentials.username)",
"def test_get_account(self):\n account = Account(self.client, \"[email protected]\", {})\n\n self.assertEqual(account.email, \"[email protected]\")\n self.assertEqual(account.state, \"PA\")\n self.assertEqual(account.city, \"Philadelphia\")\n self.assertEqual(account.phone, \"123-456-7890\")\n self.assertEqual(account.tax_id, \"\")\n self.assertEqual(account.balance, 0)\n self.assertEqual(account.company, \"Linode\")\n self.assertEqual(account.address_1, \"3rd & Arch St\")\n self.assertEqual(account.address_2, \"\")\n self.assertEqual(account.zip, \"19106\")\n self.assertEqual(account.first_name, \"Test\")\n self.assertEqual(account.last_name, \"Guy\")\n self.assertEqual(account.country, \"US\")\n self.assertIsNotNone(account.capabilities)\n self.assertIsNotNone(account.active_promotions)\n self.assertEqual(account.balance_uninvoiced, 145)\n self.assertEqual(account.billing_source, \"akamai\")\n self.assertEqual(account.euuid, \"E1AF5EEC-526F-487D-B317EBEB34C87D71\")",
"def test_get_account_from_state(self):\n state = State('test-state')\n account = Account('test-account')\n state.add_account(account)\n self.assertEqual(state.get_account('test-account'), account)",
"def test_get_email_account(self):\n email_addr = self.user_id + '@' + self.email_dom\n org = '%s=%s' % (self.org_attr, self.org_name)\n people = '%s=%s' % (self.container_attr, self.user_container)\n uid = '%s=%s' % (self.user_key, self.user_id)\n dn = '%s,%s,%s,%s' % (uid, people, org, self.base_dn)\n dn_info = {self.imap_enable: ['TRUE'],\n self.imap_mailbox: [self.user_id],\n self.imap_domain: [self.email_dom],\n self.imap_partition: [self.imap_partition_def],\n self.smtp_destination: [email_addr],\n self.smtp_enable: ['TRUE'],\n self.smtp_pri_address: [email_addr]\n }\n expected_result = [(dn, dn_info)] \n acc = SpokeEmailAccount(self.org_name, self.user_id)\n result = acc.get(self.email_addr)['data']\n self.assertEqual(result, expected_result)",
"def test_get_virtual_account_by_id(self):\n pass",
"def test_companies_company_id_data_bank_accounts_account_id_get(self):\n pass",
"def test_duo_account_list(self):\n pass",
"def test_does_exist(account, api: API, accounts_found: list):\n api.user.login.return_value = accounts_found\n assert account.does_exist() == bool(accounts_found)",
"def test_get_virtual_accounts(self):\n pass",
"def accounts():",
"def test_lookup_entry_by_name(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.assertEqual(\"12345\", self.phonebook.lookup(\"Bob\"))",
"def test_get_virtual_account_beneficiary(self):\n pass",
"def test_find_credentials(self):\n self.new_credentials.save_credentials()\n new_account= Credentials(\"Twitter\",\"josephat_otieno\", \"joseotis45\")\n new_account.save_credentials()\n\n found_credential= Credentials.find_credentials(\"Twitter\")\n\n self.assertEqual(found_credential.account_name,new_account.account_name)",
"def test_get_access_information(self):\n\n self.assertEqual(account.get_access_information('123'), account.INVALID_CODE_ERR_MSG)",
"def test_resource_user_resource_get_user_by_email_address_get(self):\n pass",
"def CheckAccount(self):\n \n if self.userName != '':\n res=self.helper.getAccounts(self.userName)\n if res != None:\n if res == []:\n return False\n else:\n return res\n else:\n return None",
"def test_showing_dietitian_account(self):\n\n result = self.client.get(\"/dietitian/1/account\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Account Details\", result.data)\n\n result = self.client.get(\"/dietitian/2/account\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)",
"def test_search_account(client):\n subjects, request_id = client.search_account(\"70506405335016096312945164\")\n\n assert len(subjects) != 0",
"async def test_retrieve_account_information(self):\n account_information = {\n 'broker': 'True ECN Trading Ltd',\n 'currency': 'USD',\n 'server': 'ICMarketsSC-Demo',\n 'balance': 7319.9,\n 'equity': 7306.649913200001,\n 'margin': 184.1,\n 'freeMargin': 7120.22,\n 'leverage': 100,\n 'marginLevel': 3967.58283542\n }\n client.get_account_information = AsyncMock(return_value=account_information)\n actual = await api.get_account_information()\n assert actual == account_information\n client.get_account_information.assert_called_with('accountId')",
"def accounts():\n pass",
"def __get_account(self, address):\n\t\tfor acct in self.wallet:\n\t\t\tif acct[\"address\"] == address:\n\t\t\t\treturn acct\n\t\traise ValueError(\"The given address does not exist in the bunkr-wallet\")",
"def test_get_account(client):\n request_test = client.post('/checks_and_balances/api/v1.0/account',\n json={'id': 0})\n json_data = request_test.get_json(force=True)\n\n wanted_result = {\n 'id': 0,\n 'name': u'David',\n 'surname': u'Vélez',\n 'product': u'Check Account',\n 'balance': 10.05\n }\n\n assert json_data == wanted_result",
"def chk_account(self, account_num, action):\n try:\n if account_num not in self.accounts:\n raise NoAccount\n except NoAccount:\n print(\"No account id \"+account_num+\" That exists in this bank\")\n else:\n if action == 'return':\n return self.accounts[account_num]\n elif action == 'del':\n del self.accounts[account_num]",
"def lookup():"
] |
[
"0.75243884",
"0.7460177",
"0.70570356",
"0.68598056",
"0.68192416",
"0.6809717",
"0.6791296",
"0.6757188",
"0.66275024",
"0.6598423",
"0.65628946",
"0.642495",
"0.6418375",
"0.63521814",
"0.63058287",
"0.6194276",
"0.61765236",
"0.6165734",
"0.6163305",
"0.6126774",
"0.60569924",
"0.604675",
"0.6026091",
"0.60175496",
"0.5980068",
"0.5979805",
"0.59657145",
"0.59642345",
"0.5957048",
"0.5956027"
] |
0.92692095
|
0
|
Update the Brownian motion process
|
def bm_update(self,dt,delta):
curr_coord = self.Coord
self.History.append(curr_coord)
change = self.bm_change(dt,delta)
self.Coord = curr_coord + change
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update(self):\n self.bpos_x += 3",
"def update_bias(self):\n self._bias = self._bias + self.update_bias_value",
"def update_bias(self):\n self._bias = self._bias + self.update_bias_value\n self.bias_clipping()",
"def update_b(self, theta, force=False):\n self.b = self.eval_b(self.theta)\n self.b_eval_cnt += 1",
"def bayesian_update(self):\n for p in self.parameters:\n p.bayesian_update()",
"def exercise2b(self):\n self.b2 = calibrate_image(self.b1)\n plt.axis('off')\n plt.imshow(self.b2)\n plt.show()\n misc.imsave(\"B2.png\", self.b2)\n misc.imsave(\"B2_Brightness.png\", print_brightness(self.b2))",
"def update_all(self,delta_t):\n self.update_thrust()\n self.update_climb_rate()\n self.update_height(delta_t)",
"def update_blue(self):\n self.image = self.blue_images.next_image()\n options = self.get_direction_options()\n if self.is_at_intersection() or self.last_position == (self.rect.centerx, self.rect.centery):\n self.direction = self.get_flee_direction(options)\n # print(self.internal_map[self.tile[0]][self.tile[1]])\n if self.direction == 'u' and 'u' in options:\n self.rect.centery -= self.speed\n elif self.direction == 'l' and 'l' in options:\n self.rect.centerx -= self.speed\n elif self.direction == 'd' and 'd' in options:\n self.rect.centery += self.speed\n elif self.direction == 'r' and 'r' in options:\n self.rect.centerx += self.speed\n if abs(self.blue_start - time.get_ticks()) > self.blue_interval:\n self.stop_blue_state()\n elif abs(self.blue_start - time.get_ticks()) > int(self.blue_interval * 0.5):\n if self.blink:\n self.image = self.blue_warnings.next_image()\n self.blink = False\n self.last_blink = time.get_ticks()\n elif abs(self.last_blink - time.get_ticks()) > self.blink_interval:\n self.blink = True",
"def update(self):\n self.brain.update()",
"def get_brownian_ball_motion(start, nframes):\n # The Wiener process parameter.\n delta = 60\n # Total time.\n T = 10.0\n # Number of steps.\n N = nframes\n # Time step size\n dt = T/N\n # Initial values of x.\n x = numpy.empty((2,N+1))\n x[:, 0] = start\n\n brownian(x[:,0], N, dt, delta, out=x[:,1:])\n\n out = []\n for i in x.transpose():\n out.append(( int(i[0]), int(i[1]) ))\n return out",
"def bPlusbStar(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 and self.prob.Y[i] == 1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += self.alphas[j] * self.prob.Y[j] * self.prob.xkernel(self.prob.X[i], self.prob.X[j])\n abcxx = 0\n for j in range(self.prob.num):\n abcxx += (self.alphas[j] + self.deltas[j]) * self.prob.xkernel(self.prob.X[i], self.prob.X[j])\n abcxx *= (1 / self.prob.gamma)\n running_total += 1 - abcxx - ayxx\n return running_total",
"def update_b(color, new_b):\n\n color.update_b(new_b)",
"def b_plus_bstar(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 and self.prob.Y[i] == 1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += (self.alphas[j] + self.etas[j]) * self.prob.Y[j] * self.prob.xkernel(self.prob.X[j],\n self.prob.X[i])\n abcxx = 0\n for j in range(self.prob.num):\n abcxx += (self.alphas[j] + self.deltas[j]) * self.prob.xkernel(self.prob.X[j], self.prob.X[i])\n abcxx *= (1 / self.prob.gamma)\n running_total += 1 - abcxx - ayxx\n return running_total",
"def FB(self):\n # The maximum update amount for these element\n StepLength_DELTA = self.dt * (self.StepLength_LIMITS[1] -\n self.StepLength_LIMITS[0]) / (6.0)\n StepVelocity_DELTA = self.dt * (self.StepVelocity_LIMITS[1] -\n self.StepVelocity_LIMITS[0]) / (2.0)\n\n # Add either positive or negative or zero delta for each\n # NOTE: 'High' is open bracket ) so the max is 1\n if self.StepLength < -self.StepLength_LIMITS[0] / 2.0:\n StepLength_DIRECTION = np.random.randint(-1, 3, 1)[0]\n elif self.StepLength > self.StepLength_LIMITS[1] / 2.0:\n StepLength_DIRECTION = np.random.randint(-2, 2, 1)[0]\n else:\n StepLength_DIRECTION = np.random.randint(-1, 2, 1)[0]\n StepVelocity_DIRECTION = np.random.randint(-1, 2, 1)[0]\n\n # Now, modify modifiable params AND CLIP\n self.StepLength += StepLength_DIRECTION * StepLength_DELTA\n self.StepLength = np.clip(self.StepLength, self.StepLength_LIMITS[0],\n self.StepLength_LIMITS[1])\n self.StepVelocity += StepVelocity_DIRECTION * StepVelocity_DELTA\n self.StepVelocity = np.clip(self.StepVelocity,\n self.StepVelocity_LIMITS[0],\n self.StepVelocity_LIMITS[1])",
"def bias(self, value):\n self.mbmod.bias = value",
"def update_anim(frame, self):\n self.step()\n self.im.set_data(self.array)\n self.im2.set_data(self.array2)",
"def cb_update(val):\n alpha_update = [sAlpha0.val, sAlpha1.val, sAlpha2.val]\n\n # update Dirichlet's parameters alpha\n dirichlet.set_param(alpha_update)\n draw_pdf_contours(axDirichlet, dirichlet, True) # Draw Dirichlet\n\n # MAP\n lambda_MAP = CatMAP.MAPinfer(x_cat, dirichlet)\n axMAP.cla()\n drawBarGraph( axMAP, \"MAP\", lambda_MAP, bar_y_max, col_MAP ) # Draw Bar graph\n\n # Bayes\n posteriorDirichlet.set_param(alpha_update)\n posteriorDirichlet.calcPosterior(x_cat)\n draw_pdf_contours(axPosteriorDirichlet, posteriorDirichlet) # Draw Posterior Dirichlet\n lambda_Bayes = np.zeros(3)\n for k in range(3):\n lambda_Bayes[k] = posteriorDirichlet.BayesInfer(k)\n\n axBayes.cla()\n drawBarGraph( axBayes, \"Bayes\", lambda_Bayes, bar_y_max, col_Bayes ) # Draw Bar graph\n\n print('Update')\n print('lambda_ML =', lambda_ML)\n print('lambda_MAP =', lambda_MAP)\n print('lambda_Bayes=', lambda_Bayes)\n draw_point(axDirichlet, lambda_ML, col_ML)\n draw_point(axDirichlet, lambda_MAP, col_MAP)\n draw_point(axDirichlet, lambda_Bayes, col_Bayes)\n draw_point(axPosteriorDirichlet, lambda_MAP, col_MAP)\n draw_point(axPosteriorDirichlet, lambda_Bayes, col_Bayes)\n\n fig.canvas.draw_idle()",
"def do_animations(self):\n self.animate_bloop(700, 160, 50)",
"def update(self):\r\n self.y -= self.speed_factor\r\n # Update blast rect position\r\n self.rect.y = self.y",
"def step(self):\n # A = self.array\n # B = self.array2\n # ra, rb, f, k = self.params\n H = self.array\n L = self.array2\n birth_rate, death_rate, a, c = self.params\n \n # cA = correlate2d(A, self.kernel, **self.options)\n # cB = correlate2d(B, self.kernel, **self.options)\n cH = correlate2d(H, self.kernel,**self.options)\n cL = correlate2d(L, self.kernel, **self.options)\n\n # reaction = A * B**2\n\n # self.array += ra * cA - reaction + f * (1-A) \n # self.array2 += rb * cB + reaction - (f+k) * B\n self.array += birth_rate*cH - a*L*H\n self.array2 += c*cL*cH - death_rate*L",
"def _birdUpdateHandler(self, pin):\n\n # Update movement value from PIR pin status\n self.p.update(pin)\n\n if(self.p.movement == 1):\n #print(\"Motion detected\")\n self._distanceCheck()\n\n timeO = 0\n while(self.birdHere == 0 and self.p.movement == 1 and timeO < self.timeout):\n sleep(1)\n self._distanceCheck()\n timeO += 1\n\n else:\n #print(\"Motion ended\")\n self.birdHere = 0",
"def update(self):\n self._brightness = self._lj.get_load_level(self._index) / 99 * 255",
"def update_chains(self):\r\n _, black_positions, white_positions = self.get_positions()\r\n\r\n self.bfs(black_positions, 1)\r\n self.bfs(white_positions, 2)",
"def update_state(self, a, obs, t):\n \n self.update_weights(a, obs, t) # only update weights, not particles \n self.update_running_average_weights(t) \n return None",
"def bn_update(loader, model, device):\n if not check_bn(model):\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print('no bn in model?!')\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>!')\n # return model\n\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n\n model = model.to(device)\n pbar = tqdm(loader, unit=\"samples\", unit_scale=loader.batch_size)\n for sample in pbar:\n inputs, targets, target_lengths = sample['input'].to(device), sample['label'].to(device), sample['label_length'].to(device)\n\n inputs = inputs.to(device)\n b = inputs.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n # model(inputs)\n # TODO:\n model(inputs, False, targets, target_lengths, 275, test_dataset.tokenizer)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))\n return model",
"def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")",
"def vbmstep(self):\n for k in range(self.k):\n self.beta_k[k] = self.beta_0 + self.counts[k]\n self.m_k[k] = (1 / self.beta_k[k]) * (self.beta_0 * self.m_0 +\n self.counts[k] * self.means[k])\n\n tmp = (self.beta_0 * self.counts[k]) / (self.beta_0 + self.counts[k])\n tmp2 = (self.means[k] - self.m_0)\n tmp = np.linalg.inv(self.W_0) + self.counts[k] * self.covars[k] + tmp * tmp2 @ tmp2.T\n self.w_k[k] = np.linalg.inv(tmp)\n self.nu_k[k] = self.nu_0 + self.counts[k]\n self.alpha_k[k] = self.alpha_0[k] + self.counts[k]",
"def updateBeta(self):\n\n priorBeta = np.copy(self.beta) # 返り値で更新幅を与えるので初期値を保持しておく\n W = self.__genW() # diag Matrix\n # update beta : Fisher Scoring Update\n result = np.matmul(np.matmul(self.X.T, W), self.X)\n result = np.matmul(np.linalg.inv(result), self.X.T)\n result = np.matmul(result, W)\n # claimFreq=0の人は, firstIterationでmu=0の0割が必ず発生する. 適切な対処法は+epsilonで良い?\n z = (self.Y - self.mu)/(self.mu + DoubleGLM.EPSILON) + np.log(self.mu + DoubleGLM.EPSILON)\n self.beta = np.matmul(result, z)\n\n # update current mu\n self.mu = np.exp(np.matmul(self.X, self.beta))\n # update current deviance\n d1 = self.Y * (self.Y**(1-self.p) - self.mu**(1-self.p)) / (1-self.p)\n d2 = (self.Y**(2-self.p) - self.mu**(2-self.p)) / (2-self.p)\n self.d = 2*self.w * (d1 - d2)\n\n return np.abs(priorBeta - self.beta)",
"def _dsb_updater(self):\n self.dsb_sensor = W1ThermSensor()\n self.humi = '---'\n self.humi_change = 0\n while True:\n # Set previous values\n self.temp_previous = self.temp\n self.humi_previous = self.humi\n self.timelast = self.measurement_time\n\n self.temp = self.dsb_sensor.get_temperature()\n # Calculate change values\n self.temp_change = self.temp - self.temp_previous\n\n # Flag change\n if self.temp_change != 0:\n self.has_changed = True\n else:\n self.has_changed = False\n\n time.sleep(2)",
"def body_bwd(timestep, viterbi, last_decision):\n backpointers_timestep = batch_gather_3d(\n backpointers_out, tf.maximum(sequence_lengths - timestep, 0))\n new_last_decision = batch_gather_2d(\n backpointers_timestep, last_decision)\n new_viterbi = viterbi.write(timestep, new_last_decision)\n return timestep + 1, new_viterbi, new_last_decision"
] |
[
"0.60446244",
"0.60419303",
"0.6027442",
"0.5988064",
"0.5986942",
"0.59794277",
"0.59581554",
"0.59413654",
"0.5925549",
"0.591917",
"0.5880924",
"0.58793706",
"0.5871149",
"0.5859573",
"0.5823392",
"0.5822276",
"0.5814989",
"0.580665",
"0.5782914",
"0.57619745",
"0.57534593",
"0.5736613",
"0.5728886",
"0.5714224",
"0.57008654",
"0.5680686",
"0.5644205",
"0.561032",
"0.5609144",
"0.55790013"
] |
0.62997043
|
0
|
Setup the events defined in the settings.
|
def _setup_events(conf):
events = {}
for name in conf.keys():
events[name] = Event(name=name)
for listener in conf[name]:
action = 'run'
if ':' in listener:
listener, action = listener.rsplit(':')
events[name].add_listener(listener, action)
# Add events to module scope.
globals().update(events)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def InitOtherEvents(self):\n\n pass",
"def setUp(self):\n event_bus._event_bus = event_bus._EventBus()",
"def init_events():\n db = get_db()\n\n from emapp.db import init_db\n init_db()\n for event in current_app.config['PREDEFINED_EVENTS']:\n db.execute(('INSERT INTO event '\n '(name, location, start_time, end_time)'\n 'VALUES (?,?,?,?)'),\n (event['name'], event['location'],\n event['start_time'], event['end_time']))\n db.commit()",
"def handleEvents(self, events):\n pass",
"def events(self, events):\n\n self._events = events",
"def events(self, events: object):\n\n self._events = events",
"def _setup_events(self):\n # Bind all events from our buttons (including 'exit')\n self.Bind(wx.EVT_BUTTON, self._process_event)",
"def set_capture_events_from_config(self):\n\n event_config = [\n {\n \"config_key\": \"events_watchlist\",\n \"events\": [\n \"watchlist.hit.process\",\n \"watchlist.hit.binary\",\n \"watchlist.storage.hit.process\",\n \"watchlist.storage.hit.binary\"\n ],\n \"options\": self.forwarder_options.get(\"wlhitnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_feed\",\n \"events\": [\n \"feed.ingress.hit.process\",\n \"feed.ingress.hit.binary\",\n \"feed.ingress.hit.host\",\n \"feed.storage.hit.process\",\n \"feed.storage.hit.binary\",\n \"feed.query.hit.process\",\n \"feed.query.hit.binary\"\n ],\n \"options\": self.forwarder_options.get(\"feedhitnotif\", \"0\")\n },\n {\n \"config_key\": \"events_alert\",\n \"events\": [\n \"alert.watchlist.hit.ingress.process\",\n \"alert.watchlist.hit.ingress.binary\",\n \"alert.watchlist.hit.ingress.host\",\n \"alert.watchlist.hit.query.process\",\n \"alert.watchlist.hit.query.binary\"\n ],\n \"options\": self.forwarder_options.get(\"alertnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_raw_sensor\",\n \"events\": [\n \"ingress.event.process\",\n \"ingress.event.procstart\",\n \"ingress.event.netconn\",\n \"ingress.event.procend\",\n \"ingress.event.childproc\",\n \"ingress.event.moduleload\",\n \"ingress.event.module\",\n \"ingress.event.filemod\",\n \"ingress.event.regmod\"\n \t\"ingress.event.tamper\",\n \t\t\"ingress.event.crossprocopen\",\n \t\t\"ingress.event.remotethread\",\n \t\t\"ingress.event.processblock\",\n \t\t\"ingress.event.emetmitigation\",\n ],\n \"options\": self.forwarder_options.get(\"rawsensnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_binary_observed\",\n \"events\": [\"binaryinfo.host.observed\",\n \"binaryinfo.observed,\"\n \"binaryinfo.group.observed\"],\n\n \"options\": self.forwarder_options.get(\"binobsnotifenabled\", \"0\")\n },\n {\n \"config_key\": \"events_binary_upload\",\n \"events\": [\"binarystore.file.added\"],\n \"options\": self.forwarder_options.get(\"binuplnotifenabled\", \"0\")\n }\n ]\n\n self.capture_events = []\n for event_type in event_config:\n events = self.forwarder_options.get(event_type[\"config_key\"], \"0\").lower()\n if events == \"all\":\n self.capture_events.extend(event_type[\"events\"])\n elif events != \"0\":\n events_from_config = events.split(\",\")\n events_to_capture = list(set(events_from_config) & set(event_type[\"events\"]))\n self.capture_events.extend(events_to_capture)\n\n self.logger.info(\"Configured to capture events: %s\" % self.capture_events)",
"def init_events_command():\n init_events()\n click.echo('Initialized the predefined events.')",
"def setupInputEventHandlers(self):\n\n default.Script.setupInputEventHandlers(self)\n self.inputEventHandlers.update(\n self.structuralNavigation.inputEventHandlers)\n\n self.inputEventHandlers[\"sayAllHandler\"] = \\\n input_event.InputEventHandler(\n Script.sayAll,\n cmdnames.SAY_ALL)\n\n self.inputEventHandlers[\"panBrailleLeftHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleLeft,\n cmdnames.PAN_BRAILLE_LEFT,\n False) # Do not enable learn mode for this action\n\n self.inputEventHandlers[\"panBrailleRightHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleRight,\n cmdnames.PAN_BRAILLE_RIGHT,\n False) # Do not enable learn mode for this action",
"def setUpClass(cls):\n super().setUpClass()\n cls.start_events_isolation()",
"def setUpClass(cls):\n super().setUpClass()\n cls.start_events_isolation()",
"def setUpClass(cls):\n super().setUpClass()\n cls.start_events_isolation()",
"def setUpClass(cls):\n super().setUpClass()\n cls.start_events_isolation()",
"def setUpClass(cls):\n super().setUpClass()\n cls.start_events_isolation()",
"def setUpClass(cls):\n super().setUpClass()\n cls.start_events_isolation()",
"def setUpClass(cls):\n super().setUpClass()\n cls.start_events_isolation()",
"def setUpClass(cls):\n super().setUpClass()\n cls.start_events_isolation()",
"def setUpClass(cls):\n super().setUpClass()\n cls.start_events_isolation()",
"def __init__(self, events):\n self.events = events",
"def setUp(self):\n user = User.objects.create(username=\"nerd\")\n self.description = \"Write world class code\"\n # specify owner of a event\n self.event = Event(description=self.description, owner=user)",
"def setup(self) -> List[Event]:\n if self._ran_setup and self.WARN_EXTRA_SETUPS:\n raise RuntimeWarning(\"Setup was already called, doing it again will create duplicate events.\")\n events = []\n\n for event in self.db.iter_events():\n self.schedule(event)\n events.append(event)\n self._ran_setup = True\n return events",
"def setup_event(site):\n folder = site['institucional']['eventos']\n event = folder['1o-ano-do-site']\n acc = IEventAccessor(event)\n future = datetime.now() + relativedelta(years=1)\n year = future.year\n month = future.month\n day = future.day\n acc.start = datetime(year, month, day, 0, 0, 0)\n acc.end = datetime(year, month, day, 23, 59, 59)\n notify(ObjectModifiedEvent(event))\n event.reindexObject()\n logger.debug(u'Evento padrao configurado')",
"def create_default_events(self):\n self.events.register_class(\"commands\", LineEvent)\n self.events.register_class(\"commands_out\", LineEvent)\n self.events.register_class(\"hooks\", HookEvent)",
"def setUp(self):\n\n TrhphTestCase.setUp(self)\n\n def evt_callback(event):\n log.info(\"CALLBACK: %s\" % str(event))\n\n # needed by DriverTestMixin\n self.driver = TrhphInstrumentDriver(evt_callback)\n self.comms_config = {\n 'addr': self.device_address,\n 'port': self.device_port}",
"def on_starting(self):\n\n self.set_capture_events_from_config()",
"def __init__(self, *args, **kwargs):\n self.events = {}",
"def setup_hooks(self):\n pass",
"def setup(self):\n self.ae = None",
"def setup_signals(self):\n\n # Hook the play/pause buttons\n self.ui.actionContinue_Live_Updates.triggered.connect(self.on_live)\n self.ui.actionPause_Live_Updates.triggered.connect(self.on_pause)\n self.ui.actionSave.triggered.connect(self.on_save)\n\n # Custom graph buttons\n self.action_graph_reset.triggered.connect(self.reset_graph)\n self.action_graph_full_extent.triggered.connect(self.full_extent)\n\n # Custom tools generated in visualize that are not actions\n self.zoom_tool.wrap_sig.clicked.connect(self.process_zoom)\n self.select_tool.wrap_sig.clicked.connect(self.process_select)"
] |
[
"0.7145879",
"0.6874534",
"0.6732326",
"0.6664637",
"0.6661688",
"0.66563714",
"0.6647324",
"0.65423274",
"0.6504739",
"0.6321489",
"0.6295267",
"0.6295267",
"0.6295267",
"0.6295267",
"0.6295267",
"0.6295267",
"0.6295267",
"0.6295267",
"0.6295267",
"0.6269704",
"0.6258585",
"0.6254712",
"0.6245821",
"0.6227551",
"0.62267023",
"0.6211887",
"0.6160544",
"0.61435133",
"0.61012477",
"0.6069648"
] |
0.71199274
|
1
|
Download a remote dataset into path Fetch a dataset pointed by remote's url, save into path using remote's filename and ensure its integrity based on the MD5 Checksum of the downloaded file. Adapted from scikitlearn's sklearn.datasets.base._fetch_remote.
|
def download_from_remote(remote, save_dir, force_overwrite=False):
if remote.destination_dir is None:
download_dir = save_dir
else:
download_dir = os.path.join(save_dir, remote.destination_dir)
if not os.path.exists(download_dir):
os.makedirs(download_dir)
download_path = os.path.join(download_dir, remote.filename)
if not os.path.exists(download_path) or force_overwrite:
# If file doesn't exist or we want to overwrite, download it
with DownloadProgressBar(
unit='B', unit_scale=True, unit_divisor=1024, miniters=1
) as t:
try:
urllib.request.urlretrieve(
remote.url,
filename=download_path,
reporthook=t.update_to,
data=None,
)
except Exception as e:
error_msg = """
mirdata failed to download the dataset!
Please try again in a few minutes.
If this error persists, please raise an issue at
https://github.com/mir-dataset-loaders/mirdata,
and tag it with 'broken-link'.
"""
print(error_msg)
raise e
checksum = md5(download_path)
if remote.checksum != checksum:
raise IOError(
'{} has an MD5 checksum ({}) '
'differing from expected ({}), '
'file may be corrupted.'.format(download_path, checksum, remote.checksum)
)
return download_path
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def download_dataset_from_url(dataset_url_md5, name, to_path):\n # Prevent concurrent FileExistsError\n try:\n if not os.path.exists(to_path):\n os.mkdir(to_path)\n except Exception:\n pass\n\n dataset_url = dataset_url_md5[\"url\"]\n dataset_md5 = dataset_url_md5[\"md5\"]\n\n dataset_filepath = os.path.join(to_path, name)\n\n if os.path.exists(dataset_filepath):\n local_file_md5 = get_file_md5(dataset_filepath)\n if local_file_md5 == dataset_md5:\n return dataset_filepath\n else:\n print(f\"Local dataset {name} is broken, ready to re-download.\")\n\n print(f'Downloading dataset: {dataset_url} to {dataset_filepath}')\n urllib.request.urlretrieve(dataset_url, dataset_filepath)\n\n if not os.path.exists(dataset_filepath):\n raise IOError(f\"Failed to download dataset from {dataset_url}\")\n return dataset_filepath",
"def download_dataset(url=DATASET_URL):\n # disable insecure https warning\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n c = urllib3.PoolManager()\n with c.request(\"GET\", url, preload_content=False) as res, open(\n LOCAL_FILE_NAME, \"wb\"\n ) as out_file:\n shutil.copyfileobj(res, out_file)\n logging.info(\"Download completed.\")",
"def _download_from_web(*, ds_name: str, ds_path: Path):\n import cgi\n import zipfile\n import httpx\n from tqdm import tqdm\n\n url = DATASET_OPTIONS[ds_name]['web']\n if ds_path.exists():\n print('Dataset directory already exists; remove it if you wish to '\n 're-download the dataset')\n return\n\n ds_path.mkdir(parents=True, exist_ok=True)\n\n with httpx.Client() as client:\n with client.stream('GET', url=url) as response:\n if not response.is_error:\n pass # All good!\n else:\n raise RuntimeError(\n f'Error {response.status_code} when trying '\n f'to download {url}')\n\n\n header = response.headers['content-disposition']\n _, params = cgi.parse_header(header)\n # where to store the archive\n outfile = ds_path / params['filename']\n remote_file_size = int(response.headers['content-length'])\n\n with open(outfile, mode='wb') as f:\n with tqdm(desc=params['filename'], initial=0,\n total=remote_file_size, unit='B',\n unit_scale=True, unit_divisor=1024,\n leave=False) as progress:\n num_bytes_downloaded = response.num_bytes_downloaded\n\n for chunk in response.iter_bytes():\n f.write(chunk)\n progress.update(response.num_bytes_downloaded -\n num_bytes_downloaded)\n num_bytes_downloaded = (response\n .num_bytes_downloaded)\n\n assert outfile.suffix == '.zip'\n\n with zipfile.ZipFile(outfile) as zip:\n for zip_info in zip.infolist():\n path_in_zip = Path(zip_info.filename)\n # omit top-level directory from Zip archive\n target_path = str(Path(*path_in_zip.parts[1:]))\n if str(target_path) in ('.', '..'):\n continue\n if zip_info.filename.endswith('/'):\n (ds_path / target_path).mkdir(parents=True, exist_ok=True)\n continue\n zip_info.filename = target_path\n print(f'Extracting: {target_path}')\n zip.extract(zip_info, ds_path)\n\n outfile.unlink()",
"def download(data_root, version):\n if version not in GroceriesReal.GROCERIES_REAL_DATASET_TABLES.keys():\n raise ValueError(\n f\"A valid dataset version is required. Available versions are:\"\n f\"{GroceriesReal.GROCERIES_REAL_DATASET_TABLES.keys()}\"\n )\n dest_path = os.path.join(\n data_root, GroceriesReal.LOCAL_PATH, f\"{version}.zip\"\n )\n expected_checksum = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].checksum\n extract_folder = os.path.join(data_root, GroceriesReal.LOCAL_PATH)\n if os.path.exists(dest_path):\n logger.info(\"The dataset file exists. Skip download.\")\n try:\n validate_checksum(dest_path, expected_checksum)\n except ChecksumError:\n logger.info(\n \"The checksum of the previous dataset mismatches. \"\n \"Delete the previously downloaded dataset.\"\n )\n os.remove(dest_path)\n if not os.path.exists(dest_path):\n source_uri = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].source_uri\n GroceriesReal._download_http(source_uri, dest_path, version)\n GroceriesReal._extract_file(dest_path, extract_folder)",
"def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )",
"def download_remote_data_file(data_url: str) -> str:\r\n # Create a data directory if it doesn't exist.\r\n data_dir_path = _find_or_create_dir(DATA_FOLDER)\r\n \r\n # Download the data file if it doesn't exist.\r\n filename = os.path.basename(urlparse(data_url).path)\r\n data_file_path = os.path.join(data_dir_path, filename)\r\n if not os.path.exists(data_file_path):\r\n print(f'Downloading data file {data_file_path}...')\r\n with urlopen(data_url) as response:\r\n with open(data_file_path, \"wb\") as data_file:\r\n shutil.copyfileobj(response, data_file)\r\n print('Done downloading data file.')\r\n\r\n return data_file_path",
"def _download(self, url, output_dir, dataset, chunk_size=1024):\n r = self.session.get(url, stream=True, allow_redirects=True)\n if not r.ok:\n r = self.session.get(r.url, stream=True, allow_redirects=True, auth=(self._username, self._password))\n file_size = int(r.headers['Content-Length'])\n\n with tqdm(total=file_size, unit_scale=True, unit='B', unit_divisor=1024) as pbar:\n ### GET FILE NAME ###\n if \"Content-Disposition\" in r.headers.keys():\n local_filename = re.findall(\"filename=(.+)\", r.headers[\"Content-Disposition\"])[0]\n else:\n local_filename = url.split(\"/\")[-3]\n local_filename = self.api.lookup(dataset, local_filename)[0]\n local_filename = local_filename + util.convert_to_extension(r.headers['content-type'])\n print(\"*** FNAME\", local_filename)\n\n local_filename = os.path.join(output_dir, local_filename)\n\n ### WRITE FILE ###\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=chunk_size):\n if chunk:\n f.write(chunk)\n pbar.update(chunk_size)\n return local_filename",
"def _download_http(source_uri, dest_path, version):\n\n try:\n logger.info(\"Downloading the dataset.\")\n download_file(source_uri=source_uri, dest_path=dest_path)\n except DownloadError as e:\n logger.info(\n f\"The request download from {source_uri} -> {dest_path} can't \"\n f\"be completed.\"\n )\n raise e\n expected_checksum = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].checksum\n try:\n validate_checksum(dest_path, expected_checksum)\n except ChecksumError as e:\n logger.info(\"Checksum mismatch. Delete the downloaded files.\")\n os.remove(dest_path)\n raise e",
"def _download_mnist_realval(dataset):\n origin = (\n 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'\n )\n print 'Downloading data from %s' % origin\n urllib.urlretrieve(origin, dataset)",
"def download(dataset_name,dataset_url):\n directory = \"tmp\"\n if not os.path.exists(os.path.join(directory,dataset_name)):\n os.makedirs(os.path.join(directory,dataset_name))\n for url, filename in get_all_data(dataset_url):\n if not os.path.exists(os.path.join(directory,dataset_name,filename)):\n print(\"Downloading \"+filename+\":\",)\n ul.urlretrieve(url,os.path.join(directory,dataset_name,filename),reporthook)\n unzip_ecco_tcp_xmls(os.path.join(directory, dataset_name), os.path.join(directory, dataset_name + \"_unzipped\"))\n shutil.rmtree(os.path.join(directory, dataset_name))\n shutil.move(os.path.join(directory, dataset_name + \"_unzipped\"), os.path.join(directory, dataset_name))\n headers_to_csv(directory, dataset_name)\n corpus_to_csv(directory, dataset_name)\n erase_all_files_with_extension(directory, dataset_name, \".hdr\")\n erase_all_files_with_extension(directory, dataset_name, \".xml\")",
"def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")",
"def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()",
"def download_data(remote_path: str, tmpdir: str) -> str:\n tar_path = os.path.join(tmpdir, \"data.tar.gz\")\n print(f\"downloading dataset from {remote_path} to {tar_path}...\")\n fs, _, rpaths = fsspec.get_fs_token_paths(remote_path)\n assert len(rpaths) == 1, \"must have single path\"\n fs.get(rpaths[0], tar_path)\n\n data_path = os.path.join(tmpdir, \"data\")\n print(f\"extracting {tar_path} to {data_path}...\")\n with tarfile.open(tar_path, mode=\"r\") as f:\n f.extractall(data_path)\n\n return data_path",
"def data_path(self, local_path: str, dataset_name: str = None, version: str = \"latest\"):\n if local_path.startswith('http'):\n root = Path('./datasets')\n root.mkdir(parents=True, exist_ok=True) # create root\n filename = root / Path(local_path).name\n print(f'Downloading {local_path} to {filename}')\n torch.hub.download_url_to_file(local_path, filename)\n local_path = str(filename)\n if local_path.endswith('.zip'): # unzip\n save_path = root / Path(filename.name[:-len('.zip')])\n print(f'Unziping {filename} to {save_path}')\n import zipfile\n with zipfile.ZipFile(filename, 'r') as zip_ref:\n zip_ref.extractall(save_path)\n local_path = str(save_path)\n return local_path\n\n if Path(local_path).exists():\n # TODO: check whether local_path contains the right version\n return local_path\n\n elif not Path(local_path).exists():\n if self.use_wandb:\n data_path, _ = self.download_dataset_artifact(dataset_name, version)\n return data_path\n\n else:\n raise Exception('Dataset not found.')",
"def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))",
"def downloadDataset(datasetName, url):\n\n baseFolder = os.path.dirname(os.path.abspath(__file__))\n destinationFolder = os.path.join(baseFolder, \"DataSets\", datasetName)\n testFolder = os.path.join(destinationFolder, \"test\")\n trainFolder = os.path.join(destinationFolder, \"train\")\n\n if not os.path.exists(os.path.join(destinationFolder, \"test\")):\n filename = os.path.join(destinationFolder, \"NISTSpecialDatabase4GrayScaleImagesofFIGS.zip\")\n if not os.path.exists(filename):\n print(\"Downloading data from \" + url + \"...\")\n urlretrieve(url, filename)\n\n try:\n print(\"Extracting \" + filename + \"...\")\n with zipfile.ZipFile(filename) as myzip:\n myzip.extractall(destinationFolder)\n print(\"Distributing the Dataset...\")\n distributeDataset(destinationFolder, testFolder, trainFolder)\n print(\"Renaming the files...\")\n renameFiles(testFolder)\n renameFiles(trainFolder)\n finally:\n os.remove(filename)\n print(\"Done.\")\n else:\n print(\"Data already available at \" + baseFolder + \"/\" + datasetName)",
"def download_dataset(urls, path):\n\n # check if the path exist or not\n os.makedirs(os.path.normpath(path), exist_ok=True)\n\n # Download the dataset\n for key in urls:\n _L(\"Downloading \" + _P(urls[key]) + \" in \" + _S(path))\n # if (urls[key].split('.')[-1] != 'tar'):\n os.system(\"wget {} -P {}\".format(urls[key], path))",
"def download(name, cache_dir=os.path.join('..', 'data')): #@save\n assert name in DATA_HUB, f\"{name} does not exist in {DATA_HUB}.\"\n url, sha1_hash = DATA_HUB[name]\n d2l.mkdir_if_not_exist(cache_dir)\n fname = os.path.join(cache_dir, url.split('/')[-1])\n if os.path.exists(fname):\n sha1 = hashlib.sha1()\n with open(fname, 'rb') as f:\n while True:\n data = f.read(1048576)\n if not data:\n break\n sha1.update(data)\n if sha1.hexdigest() == sha1_hash:\n return fname # Hit cache\n print(f'Downloading {fname} from {url}...')\n r = requests.get(url, stream=True, verify=True)\n with open(fname, 'wb') as f:\n f.write(r.content)\n return fname",
"def get_file(fname,\n origin,\n untar=False,\n md5_hash=None,\n file_hash=None,\n cache_subdir='datasets',\n hash_algorithm='auto',\n extract=False,\n archive_format='auto',\n cache_dir=None):\n if cache_dir is None:\n cache_dir = os.path.join(os.path.expanduser('~'), '.keras')\n if md5_hash is not None and file_hash is None:\n file_hash = md5_hash\n hash_algorithm = 'md5'\n datadir_base = os.path.expanduser(cache_dir)\n if not os.access(datadir_base, os.W_OK):\n datadir_base = os.path.join('/tmp', '.keras')\n datadir = os.path.join(datadir_base, cache_subdir)\n _makedirs_exist_ok(datadir)\n\n if untar:\n untar_fpath = os.path.join(datadir, fname)\n fpath = untar_fpath + '.tar.gz'\n else:\n fpath = os.path.join(datadir, fname)\n\n download = False\n if os.path.exists(fpath):\n # File found; verify integrity if a hash was provided.\n if file_hash is not None:\n if not validate_file(fpath, file_hash, algorithm=hash_algorithm):\n print('A local file was found, but {} file hash not match original value of {}.'.format(\n hash_algorithm, file_hash))\n download = True\n else:\n download = True\n\n if download:\n print('Downloading data from', origin)\n\n class ProgressTracker(object):\n # Maintain progbar for the lifetime of download.\n # This design was chosen for Python 2.7 compatibility.\n progbar = None\n\n def dl_progress(count, block_size, total_size):\n if ProgressTracker.progbar is None:\n if total_size == -1:\n total_size = None\n ProgressTracker.progbar = Progbar(total_size)\n else:\n ProgressTracker.progbar.update(count * block_size)\n\n error_msg = 'URL fetch failure on {}: {} -- {}'\n try:\n try:\n urlretrieve(origin, fpath, dl_progress)\n except HTTPError as e:\n raise Exception(error_msg.format(origin, e.code, e.msg))\n except URLError as e:\n raise Exception(error_msg.format(origin, e.errno, e.reason))\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(fpath):\n os.remove(fpath)\n raise e\n ProgressTracker.progbar = None\n\n if untar:\n if not os.path.exists(untar_fpath):\n _extract_archive(fpath, datadir, archive_format='tar')\n return untar_fpath\n\n if extract:\n _extract_archive(fpath, datadir, archive_format)\n\n return fpath",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,\n reporthook=_progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download_file(self, remote_file):\n remote_file.download()",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def _download_and_uncompress_dataset(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dataset_dir)",
"def get_file(origin,\n fname=None,\n# untar=False,\n md5_hash=None,\n file_hash=None,\n cache_dir=None,\n cache_subdir=None,\n hash_algorithm='auto',\n extract=False,\n archive_format='auto'):\n\n if fname is None:\n fname = os.path.basename(origin)\n if fname == '':\n raise Exception(\"Please specify fname\")\n\n if md5_hash is not None and file_hash is None:\n file_hash = md5_hash\n hash_algorithm = 'md5'\n\n if cache_dir is None:\n cache_dir = '/tmp/datasets'\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n if not os.access(cache_dir, os.W_OK):\n raise Exception(\"Can't write to {}\".format(cache_dir))\n if cache_subdir is not None:\n datadir = os.path.join(cache_dir, cache_subdir)\n else:\n datadir = cache_dir\n if not os.path.exists(datadir):\n os.makedirs(datadir)\n\n fpath = os.path.join(datadir, fname)\n\n download = False\n if os.path.exists(fpath):\n # File found; verify integrity if a hash was provided.\n if file_hash is not None:\n if not validate_file(fpath, file_hash, algorithm=hash_algorithm):\n print('A local file was found, but it seems to be '\n 'incomplete or outdated because the ' + hash_algorithm +\n ' file hash does not match the original value of ' +\n file_hash + ' so we will re-download the data.')\n download = True\n else:\n download = True\n\n if download:\n print('Downloading data from', origin)\n\n class ProgressTracker(object):\n # Maintain progbar for the lifetime of download.\n # This design was chosen for Python 2.7 compatibility.\n progbar = None\n\n def dl_progress(count, block_size, total_size):\n if ProgressTracker.progbar is None:\n if total_size is -1:\n total_size = None\n ProgressTracker.progbar = Progbar(total_size)\n else:\n ProgressTracker.progbar.update(count * block_size)\n\n error_msg = 'URL fetch failure on {}: {} -- {}'\n try:\n try:\n urlretrieve(origin, fpath, dl_progress)\n except URLError as e:\n raise Exception(error_msg.format(origin, e.errno, e.reason))\n except HTTPError as e:\n raise Exception(error_msg.format(origin, e.code, e.msg))\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(fpath):\n os.remove(fpath)\n raise\n ProgressTracker.progbar = None\n\n if extract:\n _extract_archive(fpath, datadir, archive_format)\n\n return fpath",
"def download(name, cache_dir=os.path.join('..', 'data')):\n assert name in DATA_HUB, f\"{name} does not exist in {DATA_HUB}.\"\n url, sha1_hash = DATA_HUB[name]\n os.makedirs(cache_dir, exist_ok=True)\n fname = os.path.join(cache_dir, url.split('/')[-1])\n if os.path.exists(fname):\n sha1 = hashlib.sha1()\n with open(fname, 'rb') as f:\n while True:\n data = f.read(1048576)\n if not data:\n break\n sha1.update(data)\n if sha1.hexdigest() == sha1_hash:\n return fname # Hit cache\n print(f'Downloading {fname} from {url}...')\n r = requests.get(url, stream=True, verify=True)\n with open(fname, 'wb') as f:\n f.write(r.content)\n return fname",
"def get_file(fname,\n origin,\n untar=False,\n md5_hash=None,\n file_hash=None,\n cache_subdir='datasets',\n hash_algorithm='auto',\n extract=False,\n archive_format='auto',\n cache_dir=None):\n if cache_dir is None:\n cache_dir = os.path.join(os.path.expanduser('~'), '.keras')\n if md5_hash is not None and file_hash is None:\n file_hash = md5_hash\n hash_algorithm = 'md5'\n datadir_base = os.path.expanduser(cache_dir)\n if not os.access(datadir_base, os.W_OK):\n datadir_base = os.path.join('/tmp', '.keras')\n datadir = os.path.join(datadir_base, cache_subdir)\n _makedirs_exist_ok(datadir)\n\n fname = path_to_string(fname)\n\n if untar:\n untar_fpath = os.path.join(datadir, fname)\n fpath = untar_fpath + '.tar.gz'\n else:\n fpath = os.path.join(datadir, fname)\n\n download = False\n if os.path.exists(fpath):\n # File found; verify integrity if a hash was provided.\n if file_hash is not None:\n if not validate_file(fpath, file_hash, algorithm=hash_algorithm):\n print('A local file was found, but it seems to be '\n 'incomplete or outdated because the ' + hash_algorithm +\n ' file hash does not match the original value of ' + file_hash +\n ' so we will re-download the data.')\n download = True\n else:\n download = True\n\n if download:\n print('Downloading data from', origin)\n\n class ProgressTracker(object):\n # Maintain progbar for the lifetime of download.\n # This design was chosen for Python 2.7 compatibility.\n progbar = None\n\n def dl_progress(count, block_size, total_size):\n if ProgressTracker.progbar is None:\n if total_size == -1:\n total_size = None\n ProgressTracker.progbar = Progbar(total_size)\n else:\n ProgressTracker.progbar.update(count * block_size)\n\n error_msg = 'URL fetch failure on {}: {} -- {}'\n try:\n try:\n urlretrieve(origin, fpath, dl_progress)\n except urllib.error.HTTPError as e:\n raise Exception(error_msg.format(origin, e.code, e.msg))\n except urllib.error.URLError as e:\n raise Exception(error_msg.format(origin, e.errno, e.reason))\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(fpath):\n os.remove(fpath)\n raise\n ProgressTracker.progbar = None\n\n if untar:\n if not os.path.exists(untar_fpath):\n _extract_archive(fpath, datadir, archive_format='tar')\n return untar_fpath\n\n if extract:\n _extract_archive(fpath, datadir, archive_format)\n\n return fpath",
"def download_dataset(self, dataset_dir, dataset_url):\n if osp.exists(dataset_dir):\n return\n\n if dataset_url is None:\n raise RuntimeError(\n '{} dataset needs to be manually '\n 'prepared, please follow the '\n 'document to prepare this dataset'.format(\n self.__class__.__name__\n )\n )\n\n print('Creating directory \"{}\"'.format(dataset_dir))\n mkdir_if_missing(dataset_dir)\n fpath = osp.join(dataset_dir, osp.basename(dataset_url))\n\n print(\n 'Downloading {} dataset to \"{}\"'.format(\n self.__class__.__name__, dataset_dir\n )\n )\n download_url(dataset_url, fpath)\n\n print('Extracting \"{}\"'.format(fpath))\n try:\n tar = tarfile.open(fpath)\n tar.extractall(path=dataset_dir)\n tar.close()\n except:\n zip_ref = zipfile.ZipFile(fpath, 'r')\n zip_ref.extractall(dataset_dir)\n zip_ref.close()\n\n print('{} dataset is ready'.format(self.__class__.__name__))",
"def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file",
"def get_file(\n fname,\n origin,\n untar=False,\n cache_subdir=\"datasets\",\n extract=False,\n archive_format=\"auto\",\n cache_dir=None,\n):\n if cache_dir is None:\n cache_dir = os.path.join(os.path.expanduser(\"~\"), \".keras\")\n datadir_base = os.path.expanduser(cache_dir)\n if not os.access(datadir_base, os.W_OK):\n datadir_base = os.path.join(\"/tmp\", \".keras\")\n datadir = os.path.join(datadir_base, cache_subdir)\n if not os.path.exists(datadir):\n os.makedirs(datadir)\n\n if untar:\n untar_fpath = os.path.join(datadir, fname)\n fpath = untar_fpath + \".tar.gz\"\n else:\n fpath = os.path.join(datadir, fname)\n\n download = False\n if os.path.exists(fpath):\n download = False\n else:\n download = True\n\n if download:\n print(\"Downloading data from\", origin)\n\n class ProgressTracker(object):\n # Maintain progbar for the lifetime of download.\n # This design was chosen for Python 2.7 compatibility.\n progbar = None\n\n def dl_progress(count, block_size, total_size):\n if ProgressTracker.progbar is None:\n if total_size == -1:\n total_size = None\n ProgressTracker.progbar = Progbar(total_size)\n else:\n ProgressTracker.progbar.update(count * block_size)\n\n error_msg = \"URL fetch failure on {}: {} -- {}\"\n try:\n try:\n urlretrieve(origin, fpath, dl_progress)\n except HTTPError as e:\n raise Exception(error_msg.format(origin, e.code, e.msg))\n except URLError as e:\n raise Exception(error_msg.format(origin, e.errno, e.reason))\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(fpath):\n os.remove(fpath)\n raise\n ProgressTracker.progbar = None\n\n if untar:\n if not os.path.exists(untar_fpath):\n _extract_archive(fpath, datadir, archive_format=\"tar\")\n return untar_fpath\n\n if extract:\n _extract_archive(fpath, datadir, archive_format)\n\n return fpath"
] |
[
"0.7259059",
"0.68588865",
"0.6651223",
"0.66476214",
"0.652797",
"0.65078294",
"0.64733714",
"0.6462734",
"0.6437489",
"0.6434678",
"0.64107436",
"0.63951904",
"0.6351276",
"0.63472974",
"0.6347012",
"0.63452065",
"0.62995946",
"0.6272911",
"0.62319505",
"0.61726815",
"0.6159516",
"0.6154234",
"0.61392546",
"0.6132388",
"0.61319876",
"0.61057985",
"0.6100721",
"0.6080256",
"0.6075056",
"0.6072739"
] |
0.7242501
|
1
|
Unzip a zip file inside it's current directory.
|
def unzip(zip_path, cleanup=False):
zfile = zipfile.ZipFile(zip_path, 'r')
zfile.extractall(os.path.dirname(zip_path))
zfile.close()
if cleanup:
os.remove(zip_path)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def unzip_file(zip_file: str) -> None:\n destination = tempfile.mkdtemp(prefix='gaelo_pross_unzip_')\n with ZipFile(zip_file) as my_zip:\n for member in my_zip.namelist():\n filename = os.path.basename(member)\n # skip directories\n if not filename:\n continue\n # copy file (taken from zipfile's extract)\n source = my_zip.open(member)\n target = open(os.path.join(destination, filename), \"wb\")\n with source, target:\n shutil.copyfileobj(source, target)\n # return destination",
"def unzip_file(data_zip, path_unzip):\r\n with zipfile.ZipFile(data_zip, \"r\") as zip_temp:\r\n zip_temp.extractall(path_unzip)",
"def unzip_file(path_to_zip_file, directory_to_extract_to):\n \n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(directory_to_extract_to)\n\n return",
"def unzip(zip_path, output_file, data_folder):\n\n print('Unzipping file: {}'.format(zip_path))\n pyunpack.Archive(zip_path).extractall(data_folder)\n\n # Checks if unzip was successful\n if not os.path.exists(output_file):\n raise ValueError(\n 'Error in unzipping process! {} not found.'.format(output_file))",
"def unzip_data():\n zip_ref = zipfile.ZipFile(data_zip, 'r')\n zip_ref.extractall('')\n zip_ref.close()",
"def unzip(f, targetdir):\n import zipfile\n\n with zipfile.ZipFile(f, \"r\") as zip_ref:\n zip_ref.extractall(targetdir)",
"def unzip(zipped_file):\n with gzip.open(zipped_file, 'rt', encoding='ISO-8859-1') as file:\n file = file.read()\n return file",
"def unzip_file(zipfile_path, target_dir, touchfile_path):\r\n with zipfile.ZipFile(zipfile_path, 'r') as zip_ref:\r\n zip_ref.extractall(target_dir)\r\n\r\n with open(touchfile_path, 'w') as touchfile:\r\n touchfile.write(f'unzipped {zipfile_path}')",
"def _unzip_file(zip_file_path: str, unzip_dir: str = \"\") -> None:\n if not unzip_dir:\n unzip_dir = os.path.dirname(zip_file_path)\n op_desc = f\"Extracting: {os.path.basename(zip_file_path)}\"\n try:\n with ZipFile(file=zip_file_path) as zip_file:\n for member_name in tqdm(zip_file.namelist(), desc=op_desc):\n file_name = os.path.basename(member_name)\n if not file_name:\n continue\n target_path = os.path.join(unzip_dir, file_name)\n target_path = open(target_path, \"wb\")\n source_file = zip_file.open(member_name)\n with source_file, target_path:\n shutil.copyfileobj(source_file, target_path)\n os.remove(zip_file_path)\n except Exception as zip_error:\n zip_file_str = os.path.basename(zip_file_path)\n zip_file_str = os.path.splitext(zip_file_str)[0]\n for file_name in os.listdir(unzip_dir):\n if zip_file_str in file_name:\n os.remove(os.path.join(unzip_dir, file_name))\n raise zip_error",
"def unzip(input_filename, extract_dir):\n if not zipfile.is_zipfile(input_filename):\n raise ValueError(\"%s is not a zip file\" % (input_filename))\n zip_ds = zipfile.ZipFile(input_filename)\n zip_ds.extractall(path=extract_dir)\n zip_ds.close()",
"def unzip(path):\n zip_ref = zipfile.ZipFile(path, 'r')\n new_path = path[:-3]\n zip_ref.extractall(new_path)\n zip_ref.close()\n return new_path",
"def unzip_file(zip_path, directory_to_extract_to):\n ensure_dir(directory_to_extract_to)\n with zipfile.ZipFile(file=zip_path) as zip_file:\n # Loop over each file\n for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist())):\n try:\n zip_file.extract(member=file, path=directory_to_extract_to)\n except BadZipFile as e:\n print(e)",
"def unzip_file(path_to_zip_file: str, dir_to_extract_to: str) -> str:\n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(dir_to_extract_to)\n return f'{dir_to_extract_to}/{zip_ref.namelist()[0]}'",
"def _unzip(filename, branch=None):\n try:\n file = zipfile.ZipFile(filename)\n basename = os.path.dirname(filename)\n basename = basename.replace(\".zip\", \"\")\n file.extractall(path=basename)\n return basename, filename\n except Exception as e:\n six.print_(e)",
"def unzip(path, filename_as_folder=False):\n for filename in os.listdir(path):\n if filename.endswith(\".zip\"):\n name = os.path.splitext(os.path.basename(filename))[0]\n if not os.path.isdir(name):\n try:\n file = os.path.join(path, filename)\n zip = ZipFile(file)\n if filename_as_folder:\n directory = os.path.join(path, name)\n os.mkdir(directory)\n print(\"Unzipping {} to {}\".format(filename, directory))\n zip.extractall(directory)\n else:\n print(\"Unzipping {} to {}\".format(filename, path))\n zip.extractall(path)\n except BadZipfile:\n print(\"BAD ZIP: \" + filename)\n try:\n os.remove(file)\n except OSError as e: # this would be \"except OSError, e:\" before Python 2.6\n if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n raise # re-raise exception if a different error occured",
"def file_unzipper(directory):\n debug.log(\"Unzipping directory (%s)...\"%directory)\n #FINDING AND UNZIPPING ZIPPED FILES\n for root, dirs, files in os.walk(directory, topdown=False):\n if root != \"\":\n orig_dir = os.getcwd()\n os.chdir(directory)\n Popen('gunzip -q -f *.gz > /dev/null 2>&1', shell=True).wait()\n Popen('unzip -qq -o \"*.zip\" > /dev/null 2>&1', shell=True).wait()\n Popen('rm -f *.zip > /dev/null 2>&1', shell=True).wait()\n os.chdir(orig_dir)",
"def unzip_data(zip_f,data_folder_path): \n\n with zipfile.ZipFile(zip_f,\"r\") as zip_ref:\n zip_ref.extractall(data_folder_path)",
"def extract_and_clean(zipper, zip_path, filename):\n zipper.extract(zip_path)\n if \"/\" in zip_path :\n os.rename(zip_path, filename)\n shutil.rmtree(zip_path.split('/')[0])",
"def unzip(zfile, md=False):\n\tbasedir = ''\n\tcount = -1\n\tif md:\n\t\tbasedir = prepareBaseDir(zfile)\n\t\n\tzfile = zipfile.ZipFile(zfile, 'r')\n\tfor name in zfile.namelist():\n\t\tcount+=1\n\t\tuname = name.decode('gbk')\n\t\tif uname.endswith('.DS_Store'):\n\t\t\tcontinue\n\t\t\n\t\t#prepare directory\n\t\tdirs = os.path.dirname(uname)\n\t\tif basedir:\n\t\t\tdirs = os.path.join(basedir, dirs)\n\t\tprint 'Extracting: ' + uname\n\t\tif dirs and not os.path.exists(dirs):\n\t\t\tprint 'Prepare directories: ', dirs\n\t\t\tos.makedirs(dirs)\n\t\tif (count == 0):\n\t\t\thomeDir = uname[:-1]\n\t\t#ready to unzip file\n\t\tdata = zfile.read(name)\n\t\tif basedir:\n\t\t\tuname = os.path.join(basedir, uname)\n\t\tif not os.path.exists(uname):\n\t\t\tfo = open(uname, 'w')\n\t\t\tfo.write(data)\n\t\t\tfo.close()\n\tzfile.close()\n\treturn homeDir",
"def unzip_nested_zip(dataset_zip, path_unzip):\r\n\r\n with zipfile.ZipFile(dataset_zip, \"r\") as zfile:\r\n try:\r\n zfile.extractall(path=path_unzip)\r\n except OSError as e:\r\n logging.warning(\r\n \"Please check the unzipped files manually. There may be some missed important files.\"\r\n )\r\n logging.warning(\"The directory is: \" + path_unzip)\r\n for root, dirs, files in os.walk(path_unzip):\r\n for filename in files:\r\n if re.search(r\"\\.zip$\", filename):\r\n file_spec = os.path.join(root, filename)\r\n new_dir = os.path.join(root, filename[0:-4])\r\n unzip_nested_zip(file_spec, new_dir)",
"def unzip(zipped_file, output_directory=None,\n prefix=\"apsharvest_unzip_\", suffix=\"\"):\n if not output_directory:\n # We create a temporary directory to extract our stuff in\n try:\n output_directory = mkdtemp(suffix=suffix,\n prefix=prefix,\n dir=os.path.join(CFG_TMPSHAREDDIR, 'apsharvest'))\n except Exception, e:\n try:\n os.removedirs(output_directory)\n except TypeError:\n pass\n raise e\n return _do_unzip(zipped_file, output_directory)",
"def unzipArchives(zip_file, password):\n with ZipFile(zip_file) as archive:\n archive.extractall(pwd=bytes(password, \"utf8\"))",
"def unzip(filepath, cleanup=False):\n\t(uzfile, ext) = os.path.splitext(filepath)\n\tif ext != '.gz':\n\t\treturn filepath\n\tif os.path.exists(uzfile):\n\t\treturn uzfile\n\t\n\twith gzip.open(filepath, 'rb') as f_in:\n\t\twith open(uzfile, 'w') as f_out:\n\t\t\tfor line in f_in:\n\t\t\t\tf_out.write(line.decode())\n\t\n\tif cleanup and file_exists(uzfile):\n\t\tos.remove(filepath)\n\treturn uzfile",
"def unzip_file(self, filename, location, flatten=True):\n if not os.path.exists(location):\n os.makedirs(location)\n zipfp = open(filename, 'rb')\n try:\n zip = zipfile.ZipFile(zipfp)\n leading = has_leading_dir(zip.namelist()) and flatten\n for name in zip.namelist():\n data = zip.read(name)\n fn = name\n if leading:\n fn = split_leading_dir(name)[1]\n fn = os.path.join(location, fn)\n dir = os.path.dirname(fn)\n if not os.path.exists(dir):\n os.makedirs(dir)\n if fn.endswith('/') or fn.endswith('\\\\'):\n # A directory\n if not os.path.exists(fn):\n os.makedirs(fn)\n else:\n fp = open(fn, 'wb')\n try:\n fp.write(data)\n finally:\n fp.close()\n finally:\n zipfp.close()",
"def Unzip(fullPath,installDirectory, quietMode):\n with zipfile.ZipFile(fullPath, 'r') as zip:\n # TODO - check zipfile contents for file number;\n # should always be 1 binary file unless Hashicorp jumps the shark on the build\n extractedFile = zip.namelist()[0]\n if not quietMode:\n print(\"[-] - Extracting (unzip) -> [{0}] ...\".format(extractedFile))\n zip.extractall(installDirectory)\n return extractedFile",
"def unzip(local_zip: str, extract_dir: str, pwd: str = None):\n def get_zipinfo_datetime(zipmember: zipfile.ZipInfo) -> datetime:\n zt = zipmember.date_time # tuple: year, month, day, hour, min, sec\n # ZIP uses localtime\n return datetime(zt[0], zt[1], zt[2], zt[3], zt[4], zt[5], tzinfo=tz.tzlocal())\n\n def has_file_changed(zipmember: zipfile.ZipInfo, dst_path):\n st: os.stat_result = None\n try:\n st = os.stat(dst_path, follow_symlinks=False)\n if st.st_size != zipmember.file_size:\n return True\n dst_mtime: datetime = datetime.fromtimestamp(st.st_mtime, tz=tz.tzlocal())\n src_mtime = get_zipinfo_datetime(zipmember)\n if dst_mtime != src_mtime:\n return True\n except (OSError, ValueError):\n return True # does not exist\n return False\n\n def make_symlink(zipmember: zipfile.ZipInfo, symlink_location, is_directory):\n target = zip.read(zipmember, pwd=pwd).decode('utf-8')\n if os.path.lexists(symlink_location):\n os.remove(symlink_location)\n os.symlink(target, symlink_location, target_is_directory=is_directory)\n\n unzipped_files: List[Tuple[zipfile.ZipFile, str]] = []\n\n with zipfile.ZipFile(local_zip, \"r\") as zip:\n for zipmember in zip.infolist():\n dst_path = os.path.normpath(os.path.join(extract_dir, zipmember.filename))\n mode = zipmember.external_attr >> 16\n is_symlink = stat.S_ISLNK(mode)\n #what = 'DIR' if zipmember.is_dir() else 'FILE'\n #what = what + ' LINK' if is_symlink else what\n #print(f'{what} {zipmember.filename} S_IMODE={stat.S_IMODE(mode):0o} S_IFMT={stat.S_IFMT(mode):0o}')\n if zipmember.is_dir(): # make dirs if needed\n if is_symlink:\n make_symlink(zipmember, dst_path, is_directory=True)\n else:\n os.makedirs(dst_path, exist_ok=True)\n elif has_file_changed(zipmember, dst_path): # only extract if file appears to be modified\n unzipped_files.append((zipmember, dst_path))\n if is_symlink:\n make_symlink(zipmember, dst_path, is_directory=False)\n else:\n with zip.open(zipmember, pwd=pwd) as src, open(dst_path, \"wb\") as dst:\n shutil.copyfileobj(src, dst)\n for zipmember, dst_path in unzipped_files:\n # set the correct permissions for files and folders\n perm = stat.S_IMODE(zipmember.external_attr >> 16)\n os.chmod(dst_path, perm)\n # always set the modification date from the zipmember timestamp,\n # this way we can avoid unnecessarily modifying files and causing full rebuilds\n time = get_zipinfo_datetime(zipmember)\n #print(f' | {dst_path} {time}')\n mtime = time.timestamp()\n if System.windows:\n os.utime(dst_path, times=(mtime, mtime))\n else:\n os.utime(dst_path, times=(mtime, mtime), follow_symlinks=False)\n\n return len(unzipped_files)",
"def extract_zip(zip_path, target_folder):\n with zipfile.ZipFile(zip_path) as archive:\n archive.extractall(target_folder)",
"def unzip(source_archive_path, target_path):\n assert zipfile.is_zipfile(source_archive_path), 'Not a valid ZIP archive'\n print('Decompressing archive {} into {}'.format(source_archive_path, target_path))\n with zipfile.ZipFile(source_archive_path) as zf:\n zf.extractall(target_path)\n print('Done')",
"def uncompress(path, dest=None, remove=True):\n # assert path.endswith('.zip')\n prefix, ext = split_if_compressed(path)\n folder_name = os.path.basename(prefix)\n file_is_zip = ext == '.zip'\n root_of_folder = None\n if ext == '.gz':\n try:\n with gzip.open(path, 'rb') as f_in, open(prefix, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n except Exception as e:\n remove_file(prefix)\n remove_file(path)\n raise e\n else:\n try:\n with zipfile.ZipFile(path, \"r\") if ext == '.zip' else tarfile.open(path, 'r:*') as archive:\n if not dest:\n namelist = sorted(archive.namelist() if file_is_zip else archive.getnames())\n if namelist[0] == '.':\n namelist = namelist[1:]\n namelist = [p[len('./'):] if p.startswith('./') else p for p in namelist]\n if ext == '.tgz':\n roots = set(x.split('/')[0] for x in namelist)\n if len(roots) == 1:\n root_of_folder = next(iter(roots))\n else:\n # only one file, root_of_folder = ''\n root_of_folder = namelist[0].strip('/') if len(namelist) > 1 else ''\n if all(f.split('/')[0] == root_of_folder for f in namelist[1:]) or not root_of_folder:\n dest = os.path.dirname(path) # only one folder, unzip to the same dir\n else:\n root_of_folder = None\n dest = prefix # assume zip contains more than one file or folder\n print('Extracting {} to {}'.format(path, dest))\n archive.extractall(dest)\n if root_of_folder:\n if root_of_folder != folder_name:\n # move root to match folder name\n os.rename(path_join(dest, root_of_folder), path_join(dest, folder_name))\n dest = path_join(dest, folder_name)\n elif len(namelist) == 1:\n dest = path_join(dest, namelist[0])\n except Exception as e:\n remove_file(path)\n if os.path.exists(prefix):\n if os.path.isfile(prefix):\n os.remove(prefix)\n elif os.path.isdir(prefix):\n shutil.rmtree(prefix)\n raise e\n if remove:\n remove_file(path)\n return dest",
"def unpack(input_filename, extract_dir):\n if not is_archive_file(input_filename):\n raise AttributeError(\"Input_filename must be an archive (ex: .tar.gz, .zip)\")\n if zipfile.is_zipfile(input_filename):\n unzip(input_filename, extract_dir)\n else:\n untar(input_filename, extract_dir)"
] |
[
"0.7677119",
"0.74962723",
"0.7415738",
"0.7398855",
"0.73234475",
"0.7294942",
"0.72689474",
"0.7235865",
"0.7220886",
"0.7124803",
"0.7121888",
"0.70193404",
"0.69754934",
"0.6958987",
"0.68196464",
"0.68058217",
"0.6780341",
"0.6754517",
"0.67413557",
"0.6729355",
"0.67256343",
"0.6646808",
"0.661175",
"0.65956694",
"0.658358",
"0.6562555",
"0.652815",
"0.65018374",
"0.64979404",
"0.64838356"
] |
0.7698946
|
0
|
Download and untar a tar file.
|
def download_tar_file(tar_remote, save_dir, force_overwrite, cleanup=False):
tar_download_path = download_from_remote(tar_remote, save_dir, force_overwrite)
untar(tar_download_path, cleanup=cleanup)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)",
"def untar(conn, tarball, path):\n conn.run(f\"tar xf {tarball} -C {path}\")",
"def download_untar(url, download_path, extract_path=None):\n file_name = url.split('/')[-1]\n if extract_path is None:\n extract_path = download_path\n tar_file_path = os.path.join(download_path, file_name)\n download(tar_file_path, url)\n sys.stdout.flush()\n print('Extracting {} archive into {}'.format(tar_file_path, extract_path))\n untar(tar_file_path, extract_path)\n os.remove(tar_file_path)",
"def _download_untar(self, dep):\n tar = sh.Command(\"tar\")\n download_url = self.dependency_dict[dep][\"tarball\"]\n dlname = download_url.split(\"/\")[-1]\n download_path = Path(\".\") / dlname\n logger.debug(f\"downloading {dep} at {download_url} to {dlname}\")\n if self.quiet:\n trackers = ()\n else:\n trackers = (ProgressTracker(DataTransferBar()),)\n request_download(download_url, download_path, trackers=trackers)\n logger.debug(\n f\"downloaded file {download_path}, size\"\n f\" {download_path.stat().st_size}\"\n )\n try:\n tar(\"xvf\", download_path, **self.output_kwargs)\n except:\n logger.error(f\"untar of {download_path} failed\")\n sys.exit(1)",
"def download_and_extract(down_dir=download_dir, url=tuda_url):\n\n wget.download(url, down_dir) \n tar_filepath = os.path.join(down_dir, \"german-speechdata-package-v2.tar.gz\")\n #with tarfile.open(tar_filepath, \"r\") as tar:\n # tar.extractall(down_dir)",
"def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))",
"def download_and_unzip_data(\n url=\"https://storage.googleapis.com/simpeg/em_examples/tdem_groundedsource/tdem_groundedsource.tar\",\n):\n # download the data\n downloads = utils.download(url)\n\n # directory where the downloaded files are\n directory = downloads.split(\".\")[0]\n\n # unzip the tarfile\n tar = tarfile.open(downloads, \"r\")\n tar.extractall()\n tar.close()\n\n return downloads, directory",
"def untar(tar_path, cleanup=False):\n tfile = tarfile.open(tar_path, 'r')\n tfile.extractall(os.path.dirname(tar_path))\n tfile.close()\n if cleanup:\n os.remove(tar_path)",
"def download_uncompress(url, path=\".\", compression=None, context=None):\n\n # infer compression from url\n if compression is None:\n compression = os.path.splitext(url)[1][1:]\n\n # check compression format and set mode\n if compression in [\"gz\", \"bz2\"]:\n mode = \"r|\" + compression\n elif compression == \"tar\":\n mode = \"r:\"\n else:\n raise ValueError(\"The file must be of type tar/gz/bz2.\")\n\n # download and untar/uncompress at the same time\n if context is not None:\n stream = urlopen(url, context=context)\n else:\n stream = urlopen(url)\n tf = tarfile.open(fileobj=stream, mode=mode)\n tf.extractall(path)",
"def download_cookbook(fileurl, download_dir):\n\n # download and save file to download_dir\n logger.info('Downloading cookbook: %s' % fileurl)\n \n # get filename\n tarname = fileurl.split('/')[-1].split('?')[0]\n tarfilepath = download_dir + tarname\n\n logger.info('Writing cookbook file to %s' % tarfilepath)\n \n with open(tarfilepath, 'w') as tmpfile:\n res = requests.get(fileurl) \n for chunk in res.iter_content(256):\n tmpfile.write(chunk)\n\n logger.info('Extracting contents of %s to %s' % (tarfilepath, download_dir))\n # extract file to download_dir\n with tarfile.open(tarfilepath) as tar:\n try:\n tar.extractall(download_dir)\n except Exception as e:\n logger.error('Error extracting tarfile %s. Ex: %s' % (tarfilepath, e))\n\n # delete the downloaded archive\n logger.info('Deleting %s' % tarfilepath)\n os.remove(tarfilepath)",
"def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)",
"def download_and_decompress(url, download_path):\n\n # Extract the filename from the URL\n parsed = urlparse(url)\n filename = basename(parsed.path)\n\n # Ensure the output directory exists\n if not os.path.exists(download_path):\n os.makedirs(download_path)\n\n # Get a temporary file path for the compressed file download\n downloaded_file = os.path.join(tempfile.gettempdir(), filename)\n\n # Download the file\n urlretrieve(url, downloaded_file)\n\n # Decompress and extract all files to the specified local path\n tar = tarfile.open(downloaded_file, \"r\")\n tar.extractall(download_path)\n tar.close()\n\n # Remove the downloaded file\n os.remove(downloaded_file)",
"def _download_and_uncompress_dataset(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dataset_dir)",
"def maybe_download_and_extract():\n dest_directory = MODEL_DIR\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download_data():\n url = 'https://www.dropbox.com/s/8oehplrobcgi9cq/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()",
"def untar(file_path, target_dir=None, gzipped=True, verbose=False):\n return posix.untar(file_path, target_dir, gzipped, verbose)",
"def download_data():\n url = 'https://www.dropbox.com/s/xk4glpk61q3qrg2/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()",
"def curl_remote_name(cls, file_url):\n tar_gz_file_name = file_url.split('/')[-1]\n\n if sys.version_info >= (3, 2):\n from urllib.error import HTTPError\n from urllib.request import urlopen\n else:\n from urllib2 import HTTPError, urlopen\n\n response = None\n try:\n response = urlopen(file_url, timeout=10)\n except HTTPError as exc:\n message = 'Download failed: URL: {0}, reason: {1}'.format(\n file_url, exc)\n if 'HTTP Error 404' in str(exc):\n raise RemoteFileNotFoundError(message)\n else:\n raise InstallError(message)\n\n tar_gz_file_obj = io.BytesIO(response.read())\n with open(tar_gz_file_name, 'wb') as f_out:\n f_out.write(tar_gz_file_obj.read())\n return tar_gz_file_name",
"def download_data():\r\n print('Downloading cifar-10 data...')\r\n request.urlretrieve(dataurl)\r\n print('Done')\r\n print('Please unzip files. command is:')\r\n print('gzip -d cifar-10-python.tar.gz')\r\n print('tar -xf cifar-10-python.tar')\r\n exit()",
"def maybe_download_and_extract():\n dest_directory = FLAGS['model_dir']\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def _download(url, outpath=None, dirname=None, branch='master', release=None):\n six.print_('downloading...')\n outfolder = outpath or os.getcwd()\n file, archive_url = get_archive_url(url, branch, release)\n six.print_(archive_url)\n if dirname:\n outfolder = \"{}/{}.zip\".format(outfolder, dirname)\n return file, wget.download(archive_url, out=outfolder)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,\n reporthook=_progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def fetch_and_extract(self, filename):\n # type: (Text) -> None\n\n with io.open(filename, 'wb') as f:\n self.bucket.download_fileobj(filename, f)\n with tarfile.open(filename, \"r:gz\") as tar:\n tar.extractall(self.data_dir)",
"def torrent_download(download_url, torrent):\n webFile = urllib.urlopen(download_url)\n localFile = open(torrent, 'wb')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()",
"def extract_tar(tar_path, target_folder):\n with tarfile.open(tar_path, 'r') as archive:\n archive.extractall(target_folder)",
"def rupture(url, outpath=None, branch='master', dirname=None, release=None):\n try:\n file, filename = _download(\n url, outpath=outpath, \n dirname=dirname, branch=branch, \n release=release\n )\n base, cs = _unzip(filename)\n _delete(filename)\n if release or branch != 'master':\n return\n to_find = \"{}/{}-{}\".format(base, file, branch)\n _newname = dirname or file\n shutil.move(to_find, base+\"/\"+_newname)\n except Exception as e:\n six.print_(traceback.format_exc())\n six.print_(\"Cannot download the repo. Could you check the repo url ?\")",
"def _extract_tar_file(tar_path, buildspace_tree, unpack_dir, ignore_files, relative_to):\n\n class NoAppendList(list):\n \"\"\"Hack to workaround memory issues with large tar files\"\"\"\n def append(self, obj):\n pass\n\n # Simple hack to check if symlinks are supported\n try:\n os.symlink('', '')\n except FileNotFoundError:\n # Symlinks probably supported\n symlink_supported = True\n except OSError:\n # Symlinks probably not supported\n get_logger().info('System does not support symlinks. Ignoring them.')\n symlink_supported = False\n except BaseException:\n # Unexpected exception\n get_logger().exception('Unexpected exception during symlink support check.')\n raise BuildkitAbort()\n\n resolved_tree = buildspace_tree.resolve()\n\n with tarfile.open(str(tar_path)) as tar_file_obj:\n tar_file_obj.members = NoAppendList()\n for tarinfo in tar_file_obj:\n try:\n if relative_to is None:\n tree_relative_path = unpack_dir / PurePosixPath(tarinfo.name)\n else:\n tree_relative_path = unpack_dir / PurePosixPath(tarinfo.name).relative_to(\n relative_to) # pylint: disable=redefined-variable-type\n try:\n ignore_files.remove(tree_relative_path.as_posix())\n except KeyError:\n destination = resolved_tree / tree_relative_path\n if tarinfo.issym() and not symlink_supported:\n # In this situation, TarFile.makelink() will try to create a copy of the\n # target. But this fails because TarFile.members is empty\n # But if symlinks are not supported, it's safe to assume that symlinks\n # aren't needed. The only situation where this happens is on Windows.\n continue\n if tarinfo.islnk():\n # Derived from TarFile.extract()\n new_target = resolved_tree / unpack_dir / PurePosixPath(\n tarinfo.linkname).relative_to(relative_to)\n tarinfo._link_target = new_target.as_posix() # pylint: disable=protected-access\n if destination.is_symlink():\n destination.unlink()\n tar_file_obj._extract_member(tarinfo, str(destination)) # pylint: disable=protected-access\n except BaseException:\n get_logger().exception('Exception thrown for tar member: %s', tarinfo.name)\n raise BuildkitAbort()",
"def _download(\n hostname,\n auth,\n infile,\n output_dir=\".\",\n show_progress=False,\n unpack_packages=True,\n delete_unpacked_packages=False,\n) -> Optional[Dict[str, Any]]:\n object_list = Manifest.load(Path(infile))\n if object_list is None:\n logger.critical(f\"Error loading {infile}\")\n return None\n try:\n auth.get_access_token()\n except Gen3AuthError:\n logger.critical(f\"Unable to authenticate your credentials with {hostname}\")\n return\n\n downloader = DownloadManager(\n hostname=hostname,\n auth=auth,\n download_list=object_list,\n show_progress=show_progress,\n )\n\n out_dir_path = ensure_dirpath_exists(Path(output_dir))\n return downloader.download(\n object_list,\n str(out_dir_path),\n show_progress=show_progress,\n unpack_packages=unpack_packages,\n delete_unpacked_packages=delete_unpacked_packages,\n )"
] |
[
"0.7930564",
"0.76136655",
"0.76053816",
"0.73980355",
"0.710569",
"0.70067215",
"0.695739",
"0.68496716",
"0.67946625",
"0.6776796",
"0.6614692",
"0.6568361",
"0.65660137",
"0.65625364",
"0.6544811",
"0.6538459",
"0.65218437",
"0.6500966",
"0.6488277",
"0.6408326",
"0.64048576",
"0.63679",
"0.6361239",
"0.6347153",
"0.62906164",
"0.6263181",
"0.62246704",
"0.62056476",
"0.61993605",
"0.61834174"
] |
0.77506953
|
1
|
Untar a tar file inside it's current directory.
|
def untar(tar_path, cleanup=False):
tfile = tarfile.open(tar_path, 'r')
tfile.extractall(os.path.dirname(tar_path))
tfile.close()
if cleanup:
os.remove(tar_path)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def untar(file_path, target_dir=None, gzipped=True, verbose=False):\n return posix.untar(file_path, target_dir, gzipped, verbose)",
"def untar(conn, tarball, path):\n conn.run(f\"tar xf {tarball} -C {path}\")",
"def untar(tarfile, outdir):\n tmpdir = tempfile.mkdtemp()\n try:\n untared = _open_archive(tarfile, tmpdir)\n files = [f for f in untared if os.path.isfile(os.path.join(tmpdir, f))]\n dirs = [d for d in untared if os.path.isdir(os.path.join(tmpdir, d))]\n assert len(files) + len(dirs) == len(untared), 'Only files and directories'\n if _files_same(tmpdir, outdir, files) and _dirs_same(tmpdir, outdir, dirs):\n # Nothing new or different in the tarfile.\n return False\n # Some or all of the files / directories are new.\n _move_files(tmpdir, outdir, files)\n _move_dirs(tmpdir, outdir, dirs)\n return True\n finally:\n if os.path.isdir(tmpdir):\n shutil.rmtree(tmpdir)",
"def untar(file_path, extract_folder=None):\n if extract_folder is None:\n extract_folder = os.path.dirname(file_path)\n tar = tarfile.open(file_path)\n tar.extractall(extract_folder)\n tar.close()",
"def unpack(filepath, target_dir, rm_tar=False):\n print(\"Unpacking %s ...\" % filepath)\n tar = tarfile.open(filepath)\n tar.extractall(target_dir)\n tar.close()\n if rm_tar == True:\n os.remove(filepath)",
"def untar(input_filename, extract_dir):\n try:\n tar_ds = tarfile.open(input_filename)\n except tarfile.TarError:\n raise ValueError(\"%s is not a tar file\" % (input_filename))\n tar_ds.extractall(path=extract_dir)\n tar_ds.close()",
"def untar_file(filename, location):\n if not os.path.exists(location):\n os.makedirs(location)\n if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):\n mode = 'r:gz'\n elif (filename.lower().endswith('.bz2')\n or filename.lower().endswith('.tbz')):\n mode = 'r:bz2'\n elif filename.lower().endswith('.tar'):\n mode = 'r'\n else:\n mode = 'r:*'\n tar = tarfile.open(filename, mode)\n try:\n leading = has_leading_dir([member.name for member in tar.getmembers()])\n for member in tar.getmembers():\n fn = member.name\n if leading:\n fn = split_leading_dir(fn)[1]\n path = os.path.join(location, fn)\n if member.isdir():\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n try:\n fp = tar.extractfile(member)\n except (KeyError, AttributeError), e:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n continue\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n destfp = open(path, 'wb')\n try:\n shutil.copyfileobj(fp, destfp)\n finally:\n destfp.close()\n fp.close()\n finally:\n tar.close()",
"def extractTar(tar_file):\r\n\r\n tfile = tarfile.open(tar_file, 'r')\r\n tar_members = tfile.getmembers()\r\n for tar_member in tar_members:\r\n tar_member.name = os.path.basename(tar_member.name) # Strip out the path and keep just the file name\r\n tfile.extract(tar_member, path=\"tempdata/\")\r\n tfile.close()\r\n print \"Finished extracting tar file\"\r\n return",
"def download_tar_file(tar_remote, save_dir, force_overwrite, cleanup=False):\n tar_download_path = download_from_remote(tar_remote, save_dir, force_overwrite)\n untar(tar_download_path, cleanup=cleanup)",
"def handle_tar(file_path, extension, extracted_path, destination_directory):\n tar = tarfile.open(file_path, extension)\n # remove files if they already exist\n if os.path.exists(extracted_path):\n shutil.rmtree(extracted_path)\n tar.extractall(path=destination_directory)\n tar.close()",
"def save_tar(self, target_dir):\n # type: (Text) -> None\n\n if not os.path.isdir(target_dir):\n raise ValueError(\"Target directory '{}' not found.\".format(target_dir))\n\n base_name = os.path.basename(target_dir)\n base_dir = os.path.dirname(target_dir)\n tarname = shutil.make_archive(base_name, 'gztar', root_dir=base_dir, base_dir=base_name)\n filekey = os.path.basename(tarname)\n self.s3.Object(self.bucket_name, filekey).put(Body=open(tarname, 'rb'))",
"def save_tar(self, target_dir):\n # type: (Text) -> None\n if not os.path.isdir(target_dir):\n raise ValueError('target_dir %r not found.' % target_dir)\n\n base_name = os.path.basename(target_dir)\n base_dir = os.path.dirname(target_dir)\n tarname = shutil.make_archive(base_name, 'gztar', root_dir=base_dir, base_dir=base_name)\n filekey = os.path.basename(tarname)\n blob = self.bucket.blob(filekey)\n blob.upload_from_filename(tarname)",
"def untar(filepath, outfoldername='.', compression='r', deletesource=False):\n import tarfile\n\n with tarfile.open(filepath, compression) as tfile:\n filelist = tfile.getnames()\n tfile.extractall(path=outfoldername)\n\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception(\"Could not delete tar archive {0}.\".format(filepath))\n\n return filelist",
"def untar(\n archive_fpath=EXTERNAL_DIR / METADATA_TAR_FNAME,\n dst_dirpath=RAW_DIR,\n force_overwrite=False\n):\n\n try:\n if not isinstance(archive_fpath, Path):\n archive_fpath = Path(archive_fpath)\n if not archive_fpath.exists():\n raise FileNotFoundError()\n except (TypeError, FileNotFoundError):\n raise FileNotFoundError(f'File {archive_fpath} not found.')\n\n try:\n if not isinstance(dst_dirpath, Path):\n dst_dirpath = Path(dst_dirpath)\n except TypeError:\n # not a filetype\n dst_dirpath = archive_fpath.parents[0]\n\n # Throw error if file already exists and force overwrite is not specified\n if not force_overwrite and (dst_dirpath / METADATA_FNAME).exists():\n raise FileExistsError(\n f'File `{METADATA_FNAME}` already exists in {dst_dirpath}.'\n )\n\n # Throw error if destination is not a known directory\n if not (dst_dirpath.exists() and dst_dirpath.is_dir()):\n raise(NotADirectoryError(\n f'Destination `{dst_dirpath}` is not a directory.'\n ))\n\n # Attempt to extract metadata file.\n with tarfile.open(archive_fpath, mode='r:xz') as tar_fp:\n try:\n info = tar_fp.getmember('metadata.tsv')\n except KeyError:\n raise KeyError(\n f'File `metadata.tsv` does not exist in tar archive `{archive_fpath}`.'\n )\n\n tar_fp.extractall(dst_dirpath, members=[info])\n\n Path(dst_dirpath / 'metadata.tsv').rename(dst_dirpath / METADATA_FNAME)",
"def untgz(tgz_filename, out_dir):\r\n logging.info(\"Source: %s\" % tgz_filename)\r\n tgz = TgzHelper(tgz_filename, out_dir)\r\n tgz.extract()",
"def create_tarfile(source_dir, filename=\"/tmp/contents.tar.gz\"):\n try:\n # Define the default signal handler for catching: Ctrl-C\n signal.signal(signal.SIGINT, signal.default_int_handler)\n with tarfile.open(filename, \"w:gz\") as tar:\n tar.add(source_dir, arcname=os.path.basename(source_dir))\n\n except (OSError, IOError) as e:\n # OSError: [Errno 13] Permission denied\n if e.errno == errno.EACCES:\n source_dir = os.getcwd() if source_dir == '.' else source_dir # Expand cwd\n warn_purge_exit(info_msg=\"Permission denied. Removing compressed data...\",\n filename=filename,\n exit_msg=(\"Permission denied. Make sure to have read permission \"\n \"for all the files and directories in the path: %s\")\n % (source_dir))\n # OSError: [Errno 28] No Space Left on Device (IOError on python2.7)\n elif e.errno == errno.ENOSPC:\n dir_path = os.path.dirname(filename)\n warn_purge_exit(info_msg=\"No space left. Removing compressed data...\",\n filename=filename,\n exit_msg=(\"No space left when compressing your data in: %s.\\n\"\n \"Make sure to have enough space before uploading your data.\")\n % (os.path.abspath(dir_path)))\n\n except KeyboardInterrupt: # Purge tarball on Ctrl-C\n warn_purge_exit(info_msg=\"Ctrl-C signal detected: Removing compressed data...\",\n filename=filename,\n exit_msg=\"Stopped the data upload gracefully.\")",
"def create_tar(self):\n with tarfile.open(self.tgzfile, \"w:gz\") as tar_handle:\n for root, _, files in os.walk(self.dirname):\n for file in files:\n tar_handle.add(os.path.join(root, file))",
"def extract_tar(tar_path, target_folder):\n with tarfile.open(tar_path, 'r') as archive:\n archive.extractall(target_folder)",
"def _download_untar(self, dep):\n tar = sh.Command(\"tar\")\n download_url = self.dependency_dict[dep][\"tarball\"]\n dlname = download_url.split(\"/\")[-1]\n download_path = Path(\".\") / dlname\n logger.debug(f\"downloading {dep} at {download_url} to {dlname}\")\n if self.quiet:\n trackers = ()\n else:\n trackers = (ProgressTracker(DataTransferBar()),)\n request_download(download_url, download_path, trackers=trackers)\n logger.debug(\n f\"downloaded file {download_path}, size\"\n f\" {download_path.stat().st_size}\"\n )\n try:\n tar(\"xvf\", download_path, **self.output_kwargs)\n except:\n logger.error(f\"untar of {download_path} failed\")\n sys.exit(1)",
"def _extract_tar_file(tar_path, buildspace_tree, unpack_dir, ignore_files, relative_to):\n\n class NoAppendList(list):\n \"\"\"Hack to workaround memory issues with large tar files\"\"\"\n def append(self, obj):\n pass\n\n # Simple hack to check if symlinks are supported\n try:\n os.symlink('', '')\n except FileNotFoundError:\n # Symlinks probably supported\n symlink_supported = True\n except OSError:\n # Symlinks probably not supported\n get_logger().info('System does not support symlinks. Ignoring them.')\n symlink_supported = False\n except BaseException:\n # Unexpected exception\n get_logger().exception('Unexpected exception during symlink support check.')\n raise BuildkitAbort()\n\n resolved_tree = buildspace_tree.resolve()\n\n with tarfile.open(str(tar_path)) as tar_file_obj:\n tar_file_obj.members = NoAppendList()\n for tarinfo in tar_file_obj:\n try:\n if relative_to is None:\n tree_relative_path = unpack_dir / PurePosixPath(tarinfo.name)\n else:\n tree_relative_path = unpack_dir / PurePosixPath(tarinfo.name).relative_to(\n relative_to) # pylint: disable=redefined-variable-type\n try:\n ignore_files.remove(tree_relative_path.as_posix())\n except KeyError:\n destination = resolved_tree / tree_relative_path\n if tarinfo.issym() and not symlink_supported:\n # In this situation, TarFile.makelink() will try to create a copy of the\n # target. But this fails because TarFile.members is empty\n # But if symlinks are not supported, it's safe to assume that symlinks\n # aren't needed. The only situation where this happens is on Windows.\n continue\n if tarinfo.islnk():\n # Derived from TarFile.extract()\n new_target = resolved_tree / unpack_dir / PurePosixPath(\n tarinfo.linkname).relative_to(relative_to)\n tarinfo._link_target = new_target.as_posix() # pylint: disable=protected-access\n if destination.is_symlink():\n destination.unlink()\n tar_file_obj._extract_member(tarinfo, str(destination)) # pylint: disable=protected-access\n except BaseException:\n get_logger().exception('Exception thrown for tar member: %s', tarinfo.name)\n raise BuildkitAbort()",
"def extract_tar(\r\n file_path: str, unpack_path: str = None, remove_if_exists: bool = False\r\n) -> Optional[str]:\r\n\r\n if not os.path.exists(file_path):\r\n log.warning(file_path + \" does not exist.\")\r\n return None\r\n\r\n if not tarfile.is_tarfile(file_path):\r\n log.warning(file_path + \" is not a tar file.\")\r\n return None\r\n\r\n if not unpack_path:\r\n unpack_path = os.path.join(\r\n os.path.dirname(file_path), os.path.splitext(os.path.basename(file_path))[0]\r\n )\r\n\r\n if os.path.isdir(unpack_path):\r\n log.info(\"Unpack directory already exists \" + unpack_path)\r\n if not os.listdir(unpack_path):\r\n log.info(\"Directory is empty. Unpacking...\")\r\n elif remove_if_exists:\r\n log.info(\"Removing existing unpacked dir: \" + unpack_path)\r\n shutil.rmtree(unpack_path)\r\n else:\r\n return unpack_path\r\n\r\n log.info(\"Unpacking file \" + os.path.basename(file_path) + \" to: \" + unpack_path)\r\n compression = identify_compression(file_path)\r\n if not compression:\r\n mode = \"r\"\r\n elif compression == \"gz\":\r\n mode = \"r:gz\"\r\n elif compression == \"bz2\":\r\n mode = \"r:bz2\"\r\n else:\r\n mode = \"r\"\r\n\r\n tar = tarfile.open(file_path, mode)\r\n tar.extractall(unpack_path)\r\n tar.close()\r\n\r\n # Tar unpacking via tar command\r\n # tar needs empty directory\r\n # if not os.path.exists(unpack_path):\r\n # os.makedirs(unpack_path)\r\n # log.info(\"Unpacking (via tar command) file \" + os.path.basename(file_path) + \" to: \" + unpack_path)\r\n # handle compression with -zvxf\r\n # cmd = \"tar -xf \" + file_path + \" -C \" + unpack_path\r\n # log.debug(\"Executing: \" + cmd)\r\n # exit_code = system_utils.bash_command(cmd)\r\n # log.info(\"Finished with exit code: \" + str(exit_code))\r\n\r\n if not os.path.exists(unpack_path):\r\n log.warning(\"Failed to extract tar file: \" + file_path)\r\n\r\n return unpack_path",
"def untar(archive):\n log.info('Unpacking archive \"%s\".' % archive)\n tar = module.params['tar']\n tar_extra_options = shlex.split(module.params['tar_extra_options'])\n if not tar:\n tar = module.get_bin_path('tar', required=True)\n if archive.endswith('.gz'):\n uncompress = 'z'\n elif archive.endswith('.bz2'):\n uncompress = 'j'\n else:\n raise ValueError('Unsupported compression type: %s' % archive)\n options = ''.join(['x', uncompress, 'f'])\n args = [tar, options] + tar_extra_options + [archive]\n rc, out, err = module.run_command(args)\n log.info('untar: rc=%d out=%s err=%s', rc, out, err)\n if rc != 0:\n raise ValueError('tar command failed: %d' % rc)",
"def unzip_and_untar(item):\n print(\"Unpacking %s\" % item)\n\n f = tarfile.open(item, mode=\"r\")\n f.extractall(path=\"working\")\n f.close()",
"def save_tar(self, target_dir):\n # type: (Text) -> None\n raise NotImplementedError(\"\")",
"def unpack_tarfile(self, tarfile_path: str, target_directory: str) -> None:\n with tarfile.open(tarfile_path) as tar:\n tar.extractall(path=target_directory)\n\n found = True\n\n while found:\n\n found = False\n\n for root_directory, _, files in os.walk(target_directory):\n for file in files:\n if re.search('.*.tar.gz', file):\n found = True\n sub_filepath_ref = os.path.join(root_directory, file)\n with tarfile.open(sub_filepath_ref) as tar:\n tar.extractall(path=root_directory)\n os.remove(sub_filepath_ref)",
"def download_untar(url, download_path, extract_path=None):\n file_name = url.split('/')[-1]\n if extract_path is None:\n extract_path = download_path\n tar_file_path = os.path.join(download_path, file_name)\n download(tar_file_path, url)\n sys.stdout.flush()\n print('Extracting {} archive into {}'.format(tar_file_path, extract_path))\n untar(tar_file_path, extract_path)\n os.remove(tar_file_path)",
"def _decompress_tarball(*, in_fileobj, out_fileobj):\n with tarfile.open(fileobj=in_fileobj, mode=\"r\") as it, tarfile.open(\n fileobj=out_fileobj, mode=\"w|\"\n ) as ot:\n for member in it.getmembers():\n extracted = it.extractfile(member)\n ot.addfile(member, extracted)",
"def upload_tar_from_git():\n require(\"release\", provided_by=[deploy])\n tree = prompt(\"Please enter a branch or SHA1 to deploy\", default=\"master\")\n local(\"git archive --format=tar %s | gzip > %s.tar.gz\" % (tree, env['release']))\n sudo(\"mkdir %(path)s/releases/%(release)s\" % env)\n put(\"%(release)s.tar.gz\" % env, \"%(path)s/packages/\" % env, use_sudo=True)\n sudo(\"cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz\" % env)\n local(\"rm %(release)s.tar.gz\" % env)",
"def upload_tar_from_git(path):\n require('release', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('branch', provided_by=[prod])\n local('git checkout %s' % (env.branch))\n local('git archive --format=tar %s | gzip > %s.tar.gz' % (env.branch, env.release))\n sudo('mkdir -p %s' % (path))\n put('%s.tar.gz' % (env.release), '/tmp/', mode=0755)\n sudo('mv /tmp/%s.tar.gz %s/packages/' % (env.release, env.code_root))\n sudo('cd %s && tar zxf ../../../packages/%s.tar.gz' % (env.whole_path, env.release))\n local('rm %s.tar.gz' % (env.release))\n sudo('rm %s/packages/%s.tar.gz' % (env.code_root, env.release))",
"def tar_dir(output_path, source_dir):\n with tarfile.open(output_path, \"w:gz\") as tar:\n tar.add(source_dir, arcname=os.path.basename(source_dir))"
] |
[
"0.8113908",
"0.75153756",
"0.73078",
"0.6981398",
"0.69512826",
"0.6876098",
"0.68632334",
"0.6681066",
"0.6653954",
"0.66418374",
"0.6571097",
"0.6553955",
"0.6527455",
"0.65201277",
"0.6481412",
"0.64048636",
"0.63865715",
"0.6363689",
"0.6341883",
"0.6340731",
"0.6338941",
"0.6333721",
"0.6330816",
"0.62171626",
"0.6213701",
"0.609461",
"0.60514474",
"0.60198057",
"0.5973107",
"0.5933967"
] |
0.7840172
|
1
|
Are a[0] and a[1] the same length?
|
def same_len(count, a:tuple):
if len(a[0]) == len(a[1]):
return count + 1
return count
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def handle_same_length(self, a, b):\n found = False\n for i, j in zip(a, b):\n if i == j:\n continue\n elif found:\n return False # this case is the second found edit, thus return false\n else:\n found = True\n return True",
"def __len__(self):\n return len(self[0]) + len(self[1])",
"def size(A):\n\treturn (len(A[0]),len(A))",
"def one_dim(a: cython.double[:]):\n a[0] *= 2\n return a[0], a.ndim",
"def test_empty(self):\n a = np.ones((3, 4, 5))\n ai = np.ones((3, 0, 5), dtype=np.intp)\n\n actual = take_along_axis(a, ai, axis=1)\n assert_equal(actual.shape, ai.shape)",
"def arrays_are_same(a1, a2):\n try:\n return a1.__array_interface__['data'] == a2.__array_interface__['data'] \\\n and a1.strides == a2.strides\n except AttributeError:\n return False",
"def to_same_dim(a, b):\n\n if len(a) > len(b):\n return a, padding(b, len(a))\n\n elif len(a) < len(b):\n return padding(a, len(b)), b\n\n else:\n return a, b",
"def _assert_same_length(\n list_series_1: Sequence[TimeSeries],\n list_series_2: Sequence[TimeSeries],\n):\n\n raise_if_not(\n len(list_series_1) == len(list_series_2),\n \"Sequences of series must be of the same length, found length:\"\n + f\" {len(list_series_1)} and {len(list_series_2)}.\",\n )",
"def __len__(self):\n return len(self.__a)",
"def test01a(self):\n b = bcolz.arange(self.N, rootdir=self.rootdir)\n b.resize(3)\n a = np.arange(3)\n # print \"b->\", `b`\n assert_array_equal(a, b[:], \"Arrays are not equal\")",
"def test01a(self):\n a = np.arange(1e2)\n b = bcolz.carray(a, chunklen=10, rootdir=self.rootdir)\n sl = slice(1)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")",
"def is_equal(a: list[int], b: list[int]) -> bool:\n a_length: int = len(a)\n b_length: int = len(b)\n if a_length == 0 and b_length == 0:\n return True\n else:\n i = 0\n if a_length == b_length:\n if a_length <= len(b):\n while i < a_length:\n if a[i] == b[i]:\n return True\n else:\n i += 1\n return False\n else:\n while i < b_length:\n if a[i] == b[i]:\n return True\n else:\n i += 1\n return False\n else:\n return False",
"def test04a(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=16, rootdir=self.rootdir)\n sl = slice(1, 2)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")",
"def test00a(self):\n b = bcolz.arange(self.N, rootdir=self.rootdir)\n b.resize(self.N-3)\n a = np.arange(self.N-3)\n # print \"b->\", `b`\n assert_array_equal(a, b[:], \"Arrays are not equal\")",
"def same_length(*args):\n if len({len(word) for word in args}) <= 1:\n return True\n return False",
"def assert_same_size(sequences):\n seq_size = len(sequences[0])\n for seq in sequences:\n if len(seq) != seq_size:\n raise SizeError",
"def two_dim(a: cython.double[:,:]):\n a[0,0] *= 3\n return a[0,0], a[0,1], a.ndim",
"def is_array(a):\n try:\n shape = a.shape\n return len(shape) >= 1\n except AttributeError:\n return False",
"def test02a(self):\n a = np.arange(1e2)\n b = bcolz.carray(a, chunklen=10, rootdir=self.rootdir)\n sl = slice(1, 3)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")",
"def really1d(arr):\n if np.ndim(arr) != 1:\n return False\n # Empty list or array\n if len(arr) == 0:\n return True\n if np.any(np.vectorize(np.ndim)(arr)):\n return False\n return True",
"def check_subarray(array1, array2):\r\n \r\n # check assumption\r\n if (len(array2.shape) != 1) or (array2.shape[0] != array1.shape[-1]):\r\n raise ValueError('Attempting to check for subarray equality when shape assumption does not hold.')\r\n \r\n return np.all(array1==array2, axis=-1)",
"def testArrayOfOne():\n arr = [1]\n sort(arr)\n expectedArr = [1]\n assert isEqual(arr, expectedArr)",
"def _checkSize(X1,X2):\n \n if len(X1) != len(X2):\n raise ValueError, 'Lists are differnt lengths'",
"def numext(self):\n numext1, numext2 = len(self.ad1), len(self.ad2)\n if numext1 != numext2:\n return [f'{numext1} v {numext2}']",
"def match_size(*arrays):\n target = arrays[0].datashape\n result = []\n\n # check for bad inputs\n for a in arrays:\n ds = a.datashape.copy()\n for i in range(min(a.ndim, target.ndim)):\n if ds.dim_low[i] < target.dim_low[i] or \\\n ds.dim_high[i] > target.dim_high[i]:\n raise ValueError(\"All array domains must be a subset \"\n \"of the first array's domain\")\n\n for a in arrays:\n ds = a.datashape.copy()\n ds.dim_low = list(ds.dim_low)\n ds.dim_high = list(ds.dim_high)\n\n for i in range(min(a.ndim, target.ndim)):\n ds.dim_low[i] = target.dim_low[i]\n ds.dim_high[i] = target.dim_high[i]\n if ds != a.datashape:\n a = a.redimension(ds.schema)\n result.append(a)\n\n return tuple(result)",
"def is1d(a):\n return np.sum(asarray(asarray(a).shape) > 1) <= 1",
"def test_list_of_equal_len():\n\n @type_checked\n def _run_test(something:[str, int, bool]):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=[None, \"12\", 1])",
"def have_same_shapes(array1, array2):\n return array1.shape == array2.shape",
"def __len__(self):\n return 2",
"def __len__(self):\n return 2"
] |
[
"0.65674996",
"0.6370597",
"0.63529515",
"0.62148297",
"0.610623",
"0.59933484",
"0.5922136",
"0.58705866",
"0.58634",
"0.58187836",
"0.5810064",
"0.57752323",
"0.5743565",
"0.5728272",
"0.56998324",
"0.5662846",
"0.5635293",
"0.56337184",
"0.56009173",
"0.55818456",
"0.5566494",
"0.5563549",
"0.5549002",
"0.5538872",
"0.55319077",
"0.5528901",
"0.5524378",
"0.55240285",
"0.55173075",
"0.55173075"
] |
0.6591336
|
0
|
Return list of tuples (spacy tokens, bpe_tokens) to compare tokens
|
def compare_joined_tokens(sp_tokens, bpe_tokens):
out = []
for s, b in zip(sp_tokens, bpe_tokens):
joined_bpe = functools.reduce(join_bpe, b, [])
if s != joined_bpe:
out.append((s, joined_bpe))
return out
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compare_tokens(options, db):\n t1, t2 = options.cmp\n d1 = db.GetNet(t1)\n d2 = db.GetNet(t2)\n union = list(set(d1 + d2))\n meta = (t1, t2, union)\n results = []\n for el in set(d1 + d2):\n el = nacaddr.IP(el)\n if el in d1 and el in d2:\n results.append(str(el))\n elif el in d1:\n results.append(str(el))\n elif el in d2:\n results.append(str(el))\n return meta, results",
"def get_bigrams(self, tokens):\n return [a + \" \" + b for a, b in zip(tokens, tokens[1:])]",
"def get_context_pairs(tokens):\n data = set()\n ngrams = get_ngrams(tokens, 4)\n if not ngrams:\n ngrams = [tokens]\n for ngrams_batch in ngrams:\n for pair in combinations(ngrams_batch, 2):\n diff_index = abs(tokens.index(pair[0]) - abs(tokens.index(pair[1])))\n if len(pair[0]) < 2 or len(pair[1]) < 2:\n continue\n data.add((pair, diff_index))\n return data",
"def diff(gold_tokens, pred_tokens):\n return list(_diff(gold_tokens, pred_tokens))",
"def tokens_and_tags(self):\n return list(zip(self.tokens(), self.tags()))",
"def _tokenize(self, text):\n bpe_tokens = []\n for token in re.findall(self.pat, text):\n bpe_tokens.extend([t for t in self.bpe(token).split(\" \")])\n return bpe_tokens",
"def sentences(a, b):\n c = [] #Declare list\n\n a = sent_tokenize(a)\n b = sent_tokenize(b)\n\n for x in a:\n for y in b:\n if x == y:\n if not x in c:\n c.append(x)\n return (c)",
"def tokens(self):\n\t\tlabels_and_synonyms = list(itertools.chain.from_iterable(list(self.term_to_tokens.values())))\n\t\ttokens = set(list(itertools.chain.from_iterable([word_tokenize(x) for x in labels_and_synonyms])))\n\t\treturn(list(tokens))",
"def sentences(a, b):\n setA = set(sent_tokenize(a))\n setB = set(sent_tokenize(b))\n return list(setA & setB)",
"def Compare_Tokenization(ref_sentences, test_sentences, **kwargs):\n output_path = kwargs.get(\"output_path\", os.environ[\"PWD\"])\n\n with open(f\"{output_path}/tok_diff.txt\", \"w\") as ft:\n for ref_sent, test_sent in zip(ref_sentences, test_sentences):\n new_ref = []\n new_test = []\n for token_ref in ref_sent:\n token_ref = token_ref.lower()\n if token_ref[0] == \"[\" and token_ref[-1] == \"]\":\n token_ref = token_ref[1:-1]\n new_ref.append(token_ref)\n for token_test in test_sent:\n token_test = token_test.lower()\n if token_test[0] == \"[\" and token_test[-1] == \"]\":\n token_test = token_test[1:-1]\n new_test.append(token_test)\n if new_ref != new_test:\n set_ref = set(new_ref)\n set_test = set(new_test)\n ft.write(\"Sentence Differs:\\n{}\\nin tokens:{}<--->{}\\n\".format(\" \".join(ref_sent), sorted(list(set_ref - set_test)), sorted(list(set_test - set_ref))))",
"def tokens(self):\n return tuple(self._tree.getWords())",
"def get_part_of_speech(tokens):\n\n return [e for e in nltk.chunk.ne_chunk(nltk.pos_tag(tokens)) if type(e) is tuple]",
"def sentences(a, b):\n a = set(sent_tokenize(a))\n b = set(sent_tokenize(b))\n return a & b",
"def tokens(text) -> Tuple[Word]:\n return tuple(re.findall('[a-z]+', text.lower()))",
"def bpe_tokenize(self, text: str) -> List[str]:\n return self.bpe.encode(text)",
"def get_tokens(self, text):\n\t\treturn tuple(self._compiled_pattern.findall(text))",
"def sentences(a, b):\n\n # TODO\n a, b = sent_tokenize(a, language='english'), sent_tokenize(b, language='english')\n return list(set([sentence for sentence in a if sentence in b]))",
"def _diff(gold_tokens, pred_tokens):\n matcher = difflib.SequenceMatcher(None, gold_tokens, pred_tokens)\n a_lo = b_lo = 0\n for a_hi, b_hi, n in matcher.get_matching_blocks():\n if a_lo < a_hi or b_lo < b_hi:\n yield gold_tokens[a_lo:a_hi], pred_tokens[b_lo:b_hi]\n a_lo = a_hi + n\n b_lo = b_hi + n",
"def _match_enums(enum_hypothesis_list, enum_reference_list):\n word_match = []\n for i in range(len(enum_hypothesis_list))[::-1]:\n for j in range(len(enum_reference_list))[::-1]:\n if enum_hypothesis_list[i][1] == enum_reference_list[j][1]:\n word_match.append((enum_hypothesis_list[i][0], enum_reference_list[j][0]))\n (enum_hypothesis_list.pop(i)[1], enum_reference_list.pop(j)[1])\n break\n return word_match, enum_hypothesis_list, enum_reference_list",
"def _tokenize(self, text: str) -> List[str]:\n tokens = []\n for token in re.findall(self._pat, text):\n tokens.extend(bpe_token for bpe_token in self._bpe(token).split(\" \"))\n return tokens",
"def sentences(a, b):\n # Tokenize has the ability to separate, in this case, sentences by finding relevant symbols (.,!,?,etc) in text\n aS1 = set(sent_tokenize(a))\n bS1 = set(sent_tokenize(b))\n\n return aS1 & bS1",
"def _split_to_wordpieces(self, tokens: List[str]) -> Tuple[List[str], List[int]]:\n bert_tokens = [] # Original tokens split into wordpieces.\n # Index of each wordpiece that starts a new token.\n token_start_indices = []\n for i, token in enumerate(tokens):\n # '+ 1' is because bert_tokens will be prepended by [CLS] token later.\n token_start_indices.append(len(bert_tokens) + 1)\n pieces = self._tokenizer.tokenize(token)\n bert_tokens.extend(pieces)\n return bert_tokens, token_start_indices",
"def sort_tokens(tokens: Iterable[Cwf]) -> List[Cwf]:\n return sorted(tokens, key=lambda t: (t.get_sent(), int(t.get_offset())))",
"def sentences(a, b):\n\n # creating a list with all the sentences using nltk\n sentencesA = sent_tokenize(a)\n sentencesB = sent_tokenize(b)\n\n same_sentences = []\n\n for sent in sentencesA:\n if sent in sentencesB and sent not in same_sentences:\n same_sentences.append(sent)\n\n return same_sentences",
"def replace_tokens(self, tokens, replace_prob):\n details = []\n idx = 0\n for i in range(len(tokens)):\n old_token = tokens[i]\n if old_token in self.idf and self.get_random_prob() < replace_prob[i]:\n # Use Tfidf find similar token\n tokens[i] = self.get_similar_token(tokens[i])\n if tokens[i] != old_token:\n details.append((old_token, tokens[i], idx, idx + len(tokens[i])))\n idx += len(tokens[i])\n return tokens, details",
"def get_pairs_to_compare(id_token_pairs):\n token_sku_id_pairs = dict()\n for sku_id, tokens in id_token_pairs.items():\n for token in tokens:\n token_sku_id_pairs[token] = token_sku_id_pairs.get(token, []) + [sku_id]\n\n pairs_to_compare = set()\n for token, sku_ids in token_sku_id_pairs.items():\n id_combinations = combinations(sku_ids, 2)\n for (id1, id2) in id_combinations:\n if (id2, id1) not in pairs_to_compare:\n pairs_to_compare.add((id1, id2))\n\n return pairs_to_compare",
"def get_tokens_with_heads(self, snlp_doc):\n tokens = []\n heads = []\n offset = 0\n for sentence in snlp_doc.sentences:\n for token in sentence.tokens:\n for word in token.words:\n # Here, we're calculating the absolute token index in the doc,\n # then the *relative* index of the head, -1 for zero-indexed\n # and if the governor is 0 (root), we leave it at 0\n if word.head:\n head = word.head + offset - len(tokens) - 1\n else:\n head = 0\n heads.append(head)\n tokens.append(word)\n offset += sum(len(token.words) for token in sentence.tokens)\n return tokens, heads",
"def compareLemma(l1, l2, cwn):\n l1 = f\"^{l1}$\"\n l2 = f\"^{l2}$\"\n \n l1_lst = [str(li).replace('<CwnSense', '').replace('>', '').replace('(', '').replace(')', '') for li in cwn.find_senses(lemma=l1)]\n l2_lst = [str(li).replace('<CwnSense', '').replace('>', '').replace('(', '').replace(')', '') for li in cwn.find_senses(lemma=l2)]\n \n if len(l1_lst) < len(l2_lst):\n zip_lst = zip(l2_lst, l1_lst)\n long = l2_lst\n short = l1_lst\n else:\n zip_lst = zip(l1_lst, l2_lst)\n long = l1_lst\n short = l2_lst\n \n # Print paired list\n for i, (a, b) in enumerate(zip_lst):\n #print(f\"{a:50}{b:>50}\")\n print(f\"{i+1:>3} {a.ljust(45, ' ')}{b}\") # 使用全形空白\n # Print remaining list\n if len(long) > len(short):\n for item in long[len(list(zip_lst)):]:\n i += 1\n print(f\"{i+1:>3} {item}\")",
"def tokens(self):\n tokens = []\n for index in range(len(self.sentrep)):\n tokens.append(self.sentrep.getWord(index).lexeme())\n return tokens",
"def get_token_list(text):\n return text.split()"
] |
[
"0.6344726",
"0.61597407",
"0.60535365",
"0.60231274",
"0.5980359",
"0.5960785",
"0.5946002",
"0.5909258",
"0.58390117",
"0.5822121",
"0.5768986",
"0.57118964",
"0.57054347",
"0.56814677",
"0.56762326",
"0.5667053",
"0.5663035",
"0.56146777",
"0.56124616",
"0.55879784",
"0.5580117",
"0.5563318",
"0.55434793",
"0.5540887",
"0.5537785",
"0.5520018",
"0.5519531",
"0.5513233",
"0.5480968",
"0.5476828"
] |
0.764389
|
0
|
is a[0] shorter than a[1]?
|
def count_less(count, a):
if len(a[0]) < len(a[1]):
return count + 1
return count
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __lt__(self, seq):\n if any(self._arr[i] < seq[i] for i in range(min(self._length, len(seq)))):\n return True\n return self._length < len(seq)",
"def minimum_inplace(a, b):",
"def lowest(t1,t2):\n compare_len = min(len(t1), len(t2))\n for i in range(0,compare_len):\n if t1[i] < t2[i]:\n return t1\n elif t1[i] > t2[i]:\n return t2\n\n # if here, identical to compare_len; just pick one\n return t1",
"def is_longer(dna1, dna2):\n return len(dna1)> len(dna2)",
"def same_len(count, a:tuple):\n if len(a[0]) == len(a[1]):\n return count + 1\n return count",
"def __le__(self, seq):\n if any(self._arr[i] > seq[i] for i in range(min(self._length, len(seq)))):\n return False\n return self._length <= len(seq)",
"def handle_same_length(self, a, b):\n found = False\n for i, j in zip(a, b):\n if i == j:\n continue\n elif found:\n return False # this case is the second found edit, thus return false\n else:\n found = True\n return True",
"def oneaway(self, a, b):\n alen, blen = len(a), len(b)\n if alen == blen:\n return self.handle_same_length(a,b)\n # assumption: deletion of a longer char from a longer string is the opposite operation to an insert .\n # Therefore we categorize strings as longer and shorter we can reduce the number of operations to check down to 2.\n if alen == blen + 1:\n return self.handle_one_off(b, a)\n elif blen == alen + 1:\n return self.handle_one_off(a, b)\n return False",
"def mini(a,b):\n\tif a < b: \n\t\treturn a\n\treturn b",
"def min(self):\n a = self.array_form\n min = len(a)\n for i in xrange(len(a)):\n if a[i] != i and a[i] < min:\n min = a[i]\n return min",
"def solution(a, b):\n if len(a) < len(b):\n return a + b + a\n return b + a + b",
"def verify(array):\r\n for i in range(1, len(array)):\r\n if array[i - 1] > array[i]:\r\n raise ValueError('Unsorted list detected. (' + str(array[i - 1]) + ' > ' + str(array[i]) + ')')",
"def compare_eq_len(a, b):\n n = len(a)\n m = len(b)\n\n # slide b across a from left to right till from just overlapping till full overlap\n overlap = 0 # stores length of the overlap\n lconcat = \"\" # this stores the shortest common superstring\n for j in range(m):\n starta = 0\n enda = j+1\n startb = m - (j+1)\n endb = m\n if a[starta:enda] == b[startb:endb]:\n # if an overlap is found, check if it is larger than the previously detected one\n # print(\"overlap found\")\n if len(a[starta:enda]) > overlap: \n overlap = len(a[starta:enda]) \n lconcat = b + a[enda:] # this is the current shortest common superstring\n # print(starta, enda, startb, endb, a[starta:enda], b[startb:endb])\n\n # print(\"-\")\n\n # slide b across a so that b starts from one element past a after full overlap\n rconcat = \"\"\n for j in range(m-1):\n starta = j+1\n enda = m\n startb = 0\n endb = m - (j+1)\n if a[starta:enda] == b[startb:endb]:\n if len(a[starta:enda]) > overlap: # if there is a bigger overlap then save it \n # print(\"overlap found\")\n overlap = len(a[starta:enda]) \n rconcat = a + b[endb:]\n # print(starta, enda, startb, endb, a[starta:enda], b[startb:endb])\n\n # after checking for overlaps there may be 1 or no shortest common\n # superstrings stored in both lconcat and rconcat. Choose the shortest one if it exists\n # or the concatenation of a and b if there are no overlaps. We may have to make some\n # arbitrary choices here.\n\n if not lconcat and not rconcat: # both lconcat and rconcat are empty, no overlaps\n superstring = a + b # append b to a (could prepend here too, this is an arbitrary choice)\n elif lconcat and not rconcat: # lconcat contains overlap and rconcat is empty\n superstring = lconcat \n elif rconcat and not lconcat: # rconcat contains overlap and lconcat is empty\n superstring = rconcat\n elif rconcat and lconcat and (len(lconcat) <= len(rconcat)): # use lconcat if it is shorter or equal len to rconat\n superstring = lconcat\n elif rconcat and lconcat and (len(rconcat) < len(lconcat)): # use rconcat only if it is shorter than lconat\n superstring = rconcat\n return superstring",
"def insertion_sort_single_alpha(arr:Sequence[AlphaList]) -> AlphaList:\n lsi = 1\n while lsi <= len(arr)-1:\n insert_into = 0\n compare = arr[lsi]\n for idx in range(lsi,-1,-1):\n if ord(compare) > ord(arr[idx]):break\n insert_into = idx\n del arr[lsi]\n arr.insert(insert_into,compare)\n lsi += 1\n return arr",
"def stooge_sort(arr):\r\n stooge(arr, 0, len(arr) - 1)",
"def is_longer(dna1, dna2):\n return get_length(dna1) > get_length(dna2)",
"def find_first_after(a, b):\n a = np.atleast_1d(np.array(a))\n b = np.atleast_1d(np.array(b))\n out = np.zeros(len(a), dtype='int32')\n for i in range(len(a)):\n out_i = np.argmin(abs(b - a[i]))\n if b[out_i] < a[i]:\n out_i = out_i + 1\n out[i] = out_i\n return out",
"def _compare(a, b):\n a = _split(a)\n b = _split(b)\n if a[0] != b[0]:\n if a[0] > b[0]:\n return 1\n else:\n return -1\n max_len = max(len(a[1]), len(b[1]))\n for i in range(max_len):\n if i > len(b[1]):\n return 1\n elif i > len(a[1]):\n return -1\n schar = a[1][i]\n ochar = b[1][i]\n if schar > ochar:\n return 1\n elif schar < ochar:\n return -1",
"def less(x1, x2):\n return compare_chararrays(x1, x2, '<', True)",
"def test_merge_list_empty_first(empty_ll, short_ll):\n assert ml(empty_ll, short_ll) == 8\n assert len(short_ll) == 4",
"def __cmp__(self, other):\n if self.length < other.length:\n return -1\n if self.length == other.length:\n return 0\n if self.length > other.length:\n return 1",
"def testLargerArray():\n arr = [7, 4, 9, 6, 3, 8, 1, 5, 2]\n sort(arr)\n expectedArr = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n assert isEqual(arr, expectedArr)",
"def minDist(l, a, b):\n pre = 0\n rt = float('INF')\n for i in range(len(l)):\n if l[i] == a or l[i] == b:\n pre = i\n break\n\n for i in range(pre+1, len(l)):\n if l[i] == a or l[i] == b:\n if l[i] != l[pre] and i - pre < rt:\n rt = i - pre\n pre = i\n return rt",
"def smaller(s):\n if len(s) == 1:\n return s\n if s[0] > s[-1]:\n return smaller(s[1:])\n return smaller(s[:-1])",
"def handle_one_off(self, shorter, longer):\n found = False\n for n, c in enumerate(shorter):\n if shorter[n] == longer[n]:\n continue\n elif shorter[n] == longer[n+1]:\n if not found:\n found = True\n else:\n return False\n return True",
"def least_disruptive_subarray(a, s):\n assert len(s) <= len(a)\n\n s_sum = sum(s)\n a_sum = sum(a[i] for i in xrange(len(s)))\n disruption = abs(a_sum - s_sum)\n index = 0\n\n for i in xrange(len(s), len(a)):\n a_sum += (a[i] - a[i - len(s)])\n\n if abs(a_sum - s_sum) < disruption:\n index = i - len(s) + 1\n disruption = abs(a_sum - s_sum)\n\n return index",
"def __le__(self, other):\n try:\n return self.length2 <= other.length2\n except AttributeError:\n return assert_unorderable(self, other)",
"def find_last_before(a, b):\n a = np.atleast_1d(np.array(a))\n b = np.atleast_1d(np.array(b))\n out = np.zeros(len(a), dtype='int32')\n for i in range(len(a)):\n out_i = np.argmin(abs(b - a[i]))\n if b[out_i] > a[i]:\n out_i = out_i - 1\n out[i] = out_i\n return out",
"def sort_1(l):\n pass",
"def smallest(*args):\r\n if len(args) == 2:\r\n a, b = args\r\n return switch(a < b, a, b)\r\n else:\r\n return min(stack(*args), axis=0)"
] |
[
"0.5934056",
"0.58563995",
"0.5855364",
"0.5842192",
"0.5823215",
"0.58195823",
"0.5754728",
"0.5728443",
"0.5691534",
"0.56854546",
"0.5677097",
"0.5663166",
"0.5659071",
"0.56569976",
"0.55985916",
"0.5598521",
"0.5582329",
"0.5577947",
"0.55678904",
"0.55565935",
"0.555032",
"0.5524918",
"0.54976755",
"0.54601276",
"0.54460365",
"0.5428791",
"0.5414355",
"0.5403018",
"0.5394291",
"0.537732"
] |
0.63655823
|
0
|
Take obj_a and obj_b and combines them under a boolean union object. returns the boolean object. Make sure that these objects aren't attached to a scene as this function does not clone them
|
def make_boole_union(obj_a, obj_b):
# If we're missing any objects, return
if obj_a is None or obj_b is None:
return
# Create a union type boolean object
boole = c4d.BaseObject(c4d.Oboole)
boole[c4d.BOOLEOBJECT_TYPE] = 0 # A Union B
# Put them inside the boole
obj_a.InsertUnder(boole)
obj_b.InsertUnder(boole)
# Rename the boolean object to: ObjA + ObjB
boole.SetName(obj_a.GetName() + " + " + obj_b.GetName())
# Return the boolean object
return boole
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __and__(self, obj):\n return self._boolean_operation(obj, operator.__and__)",
"def __or__(self, obj):\n return self._boolean_operation(obj, operator.__or__)",
"def and_(a, b):",
"def __and__(self, other):\n return intersect(self, other)",
"def _boolean_region(all_polygons_A, all_polygons_B,\n bboxes_A, bboxes_B,\n left, bottom, right, top,\n operation = 'and',\n precision = 1e-4):\n\n polygons_to_boolean_A = _crop_edge_polygons(all_polygons_A, bboxes_A,\n left, bottom, right, top,\n precision)\n polygons_to_boolean_B = _crop_edge_polygons(all_polygons_B, bboxes_B,\n left, bottom, right, top,\n precision)\n polygons_boolean = clipper.clip(polygons_to_boolean_A,\n polygons_to_boolean_B,\n operation, 1/precision)\n return polygons_boolean",
"def _op(\n x: Union[bool, dts.Boolean, tps.BooleanValue],\n y: Union[bool, dts.Boolean, tps.BooleanValue],\n ) -> T:",
"def __mul__(self, obj):\n return self & obj",
"def __or__(self, other):\n return self.union(other)",
"def __or__(self, other):\n return self.union(other)",
"def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T:\n left_result: T = visit(obj.left, visitor=visitor)\n right_result: T = visit(obj.right, visitor=visitor)\n return visitor.visit_and(left_result=left_result, right_result=right_result)",
"def singleboolean(self, context, wall, o):\n\n # generate holes for crossing window and doors\n self.itM = wall.matrix_world.inverted()\n d = self.datablock(o)\n\n hole = None\n hole_obj = None\n # default mode defined by __init__\n self.detect_mode(context, wall)\n\n if d is not None:\n if self.mode != 'ROBUST':\n hole = d.interactive_hole(context, o)\n else:\n hole = d.robust_hole(context, o.matrix_world)\n if hole is None:\n return\n\n hole.data.materials.clear()\n for mat in wall.data.materials:\n hole.data.materials.append(mat)\n\n self.prepare_hole(hole)\n\n if self.mode == 'INTERACTIVE':\n # update / remove / add boolean modifier\n self.difference(wall, hole)\n\n elif self.mode == 'HYBRID':\n m = wall.modifiers.get('AutoMixedBoolean')\n\n if m is None:\n m = wall.modifiers.new('AutoMixedBoolean', 'BOOLEAN')\n m.operation = 'DIFFERENCE'\n\n if m.object is None:\n hole_obj = self.create_merge_basis(context, wall)\n m.object = hole_obj\n else:\n hole_obj = m.object\n self.union(hole_obj, hole)\n\n bpy.ops.object.select_all(action='DESELECT')\n\n # parenting childs to wall reference point\n if wall.parent is None:\n x, y, z = wall.bound_box[0]\n context.scene.cursor_location = wall.matrix_world @ Vector((x, y, z))\n # fix issue #9\n context.view_layer.objects.active = wall\n bpy.ops.archipack.reference_point()\n else:\n context.view_layer.objects.active = wall.parent\n\n if hole_obj is not None:\n hole_obj.select_set(state=True)\n\n wall.select_set(state=True)\n o.select_set(state=True)\n bpy.ops.archipack.parent_to_reference()\n wall.select_set(state=True)\n context.view_layer.objects.active = wall\n if \"archipack_wall2\" in wall.data:\n d = wall.data.archipack_wall2[0]\n g = d.get_generator()\n d.setup_childs(wall, g)\n d.relocate_childs(context, wall, g)\n elif \"archipack_roof\" in wall.data:\n pass\n if hole_obj is not None:\n self.prepare_hole(hole_obj)",
"def __and__(self, other):\n return self.intersection(other)",
"def _iou(self, obj_a, obj_b):\n # compute the area of both the prediction and ground-truth\n # rectangles\n box_a_area = (obj_a[2] - obj_a[0] + 1) * (obj_a[3] - obj_a[1] + 1)\n box_b_area = (obj_b[2] - obj_b[0] + 1) * (obj_b[3] - obj_b[1] + 1)\n\n # determine the (x, y)-coordinates of the intersection rectangle\n x_a = max(obj_a[0], obj_b[0])\n y_a = max(obj_a[1], obj_b[1])\n x_b = min(obj_a[2], obj_b[2])\n y_b = min(obj_a[3], obj_b[3])\n\n # compute the area of intersection rectangle\n inter_area = max(0, x_b - x_a + 1) * max(0, y_b - y_a + 1)\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = inter_area / float(box_a_area + box_b_area - inter_area)\n\n # return the intersection over union value\n return iou",
"def merge_objects(\n object_a, object_b, easy_fields, difficult_fields, choices, base_a=True\n):\n if base_a:\n base_obj = object_a\n merging_obj = object_b\n else:\n base_obj = object_b\n merging_obj = object_a\n\n # used to catch all IntegrityErrors caused by violated database constraints\n # when adding two similar entries by the manager (see below for more\n # details)\n integrity_errors = []\n\n with transaction.atomic():\n for attr in easy_fields:\n value = choices.get(attr)\n if value == \"obj_a\":\n setattr(base_obj, attr, getattr(object_a, attr))\n elif value == \"obj_b\":\n setattr(base_obj, attr, getattr(object_b, attr))\n elif value == \"combine\":\n try:\n new_value = getattr(object_a, attr) + getattr(object_b, attr)\n setattr(base_obj, attr, new_value)\n except TypeError:\n # probably 'unsupported operand type', but we\n # can't do much about it…\n pass\n\n for attr in difficult_fields:\n if attr == \"comments\":\n # special case handled below the for-loop\n continue\n\n related_a = getattr(object_a, attr)\n related_b = getattr(object_b, attr)\n\n manager = getattr(base_obj, attr)\n value = choices.get(attr)\n\n # switch only if this is opposite object\n if value == \"obj_a\" and manager != related_a:\n if hasattr(manager, \"clear\"):\n # M2M and FK with `null=True` have `.clear()` method\n # which unassigns instead of removing the related objects\n manager.clear()\n else:\n # in some cases FK are strictly related with the instance\n # ie. they cannot be unassigned (`null=False`), so the\n # only sensible solution is to remove them\n manager.all().delete()\n manager.set(list(related_a.all()))\n\n elif value == \"obj_b\" and manager != related_b:\n if hasattr(manager, \"clear\"):\n # M2M and FK with `null=True` have `.clear()` method\n # which unassigns instead of removing the related objects\n manager.clear()\n else:\n # in some cases FK are strictly related with the instance\n # ie. they cannot be unassigned (`null=False`), so the\n # only sensible solution is to remove them\n manager.all().delete()\n manager.set(list(related_b.all()))\n\n elif value == \"obj_a\" and manager == related_a:\n # since we're keeping current values, try to remove (or clear\n # if possible) opposite (obj_b) - they may not be removable\n # via on_delete=CASCADE, so try manually\n if hasattr(related_b, \"clear\"):\n related_b.clear()\n else:\n related_b.all().delete()\n\n elif value == \"obj_b\" and manager == related_b:\n # since we're keeping current values, try to remove (or clear\n # if possible) opposite (obj_a) - they may not be removable\n # via on_delete=CASCADE, so try manually\n if hasattr(related_a, \"clear\"):\n related_a.clear()\n else:\n related_a.all().delete()\n\n elif value == \"combine\":\n to_add = None\n if attr == \"consent_set\":\n archive_least_recent_active_consents(object_a, object_b, base_obj)\n\n if manager == related_a:\n to_add = related_b.all()\n if manager == related_b:\n to_add = related_a.all()\n\n # Some entries may cause IntegrityError (violation of\n # uniqueness constraint) because they are duplicates *after*\n # being added by the manager.\n # In this case they must be removed to not cause\n # on_delete=PROTECT violation after merging\n # (merging_obj.delete()).\n for element in to_add:\n try:\n with transaction.atomic():\n manager.add(element)\n except IntegrityError:\n try:\n element.delete()\n except IntegrityError as e:\n integrity_errors.append(str(e))\n\n if \"comments\" in choices:\n value = choices[\"comments\"]\n # special case: comments made regarding these objects\n comments_a = Comment.objects.for_model(object_a)\n comments_b = Comment.objects.for_model(object_b)\n base_obj_ct = ContentType.objects.get_for_model(base_obj)\n\n if value == \"obj_a\":\n # we're keeping comments on obj_a, and removing (hiding)\n # comments on obj_b\n # WARNING: sequence of operations is important here!\n comments_b.update(is_removed=True)\n comments_a.update(\n content_type=base_obj_ct,\n object_pk=base_obj.pk,\n )\n\n elif value == \"obj_b\":\n # we're keeping comments on obj_b, and removing (hiding)\n # comments on obj_a\n # WARNING: sequence of operations is important here!\n comments_a.update(is_removed=True)\n comments_b.update(\n content_type=base_obj_ct,\n object_pk=base_obj.pk,\n )\n\n elif value == \"combine\":\n # we're making comments from either of the objects point to\n # the new base object\n comments_a.update(\n content_type=base_obj_ct,\n object_pk=base_obj.pk,\n )\n comments_b.update(\n content_type=base_obj_ct,\n object_pk=base_obj.pk,\n )\n\n merging_obj.delete()\n\n return base_obj.save(), integrity_errors",
"def fits(a, b):\n return all(x & y for x, y in zip(a, b))",
"def Common(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperations_Common(self, *args)",
"def combine_mask(mask_a, mask_b=None):\n if mask_b is None:\n return mask_a\n\n return np.logical_and(mask_a, mask_b)",
"def check_intersection(obj1, obj2):\n (x1, y1, w1, h1) = obj1.get_box()\n (x2, y2, w2, h2) = obj2.get_box()\n if x2 + w2 - 1 < x1 or x2 >= x1 + w1:\n return False\n if y2 + h2 - 1 < y1 or y2 >= y1 + h1:\n return False\n \n return True",
"def or_(a, b):",
"def _boolean_operation(self, obj, op):\n if isinstance(obj, Matrix):\n if self.m != obj.m or self.n != obj.n:\n raise exc.ComformabilityError(\n \"matrices must have the same dimensions\")\n if not isinstance(obj, BooleanMatrix):\n raise TypeError(\"operation only exists for Boolean matrices\")\n data = [[op(self[i, j], obj[i, j])\n for j in range(self.n)]\n for i in range(self.m)]\n elif isinstance(obj, bool):\n data = [[op(self[i, j], obj)\n for j in range(self.n)]\n for i in range(self.m)]\n else:\n raise TypeError(\n \"operation can't be performed with object of type \" +\n type(obj).__name__)\n return self.__class__(self.m, self.n, data)",
"def __and__(self, other):\n return self.and_(other)",
"def union(a, b):\n if a == b:\n return a\n elif not a:\n return b\n elif not b:\n return a\n a = Const.unwrap(a)\n b = Const.unwrap(b)\n # TODO(robertwb): Work this into the Union code in a more generic way.\n if type(a) == type(b) and element_type(a) == typehints.Union[()]:\n return b\n elif type(a) == type(b) and element_type(b) == typehints.Union[()]:\n return a\n return typehints.Union[a, b]",
"def logical_xor(a, b):\n return bool(a) ^ bool(b)",
"def fast_boolean(operandA,\n operandB,\n operation,\n precision=0.001,\n max_points=_max_points,\n layer=0,\n datatype=0):\n polyA = []\n polyB = []\n for poly, obj in zip((polyA, polyB), (operandA, operandB)):\n if isinstance(obj, PolygonSet):\n poly.extend(obj.polygons)\n elif isinstance(obj, CellReference) or isinstance(obj, CellArray):\n poly.extend(obj.get_polygons())\n elif obj is not None:\n for inobj in obj:\n if isinstance(inobj, PolygonSet):\n poly.extend(inobj.polygons)\n elif isinstance(inobj, CellReference) or isinstance(\n inobj, CellArray):\n poly.extend(inobj.get_polygons())\n else:\n poly.append(inobj)\n if len(polyB) == 0:\n polyB.append(polyA.pop())\n result = clipper.clip(polyA, polyB, operation, 1 / precision)\n return None if len(result) == 0 else PolygonSet(\n result, layer, datatype, verbose=False).fracture(\n max_points, precision)",
"def _mergeCalibBlocks_isMergeable(object1: Any, object2: Any) -> bool:\n if isinstance(object1, list):\n if not isinstance(object2, list):\n return False\n if len(object1) != len(object2):\n return False\n return all(\n _mergeCalibBlocks_isMergeable(elem1, elem2)\n for elem1, elem2 in zip(object1, object2)\n )\n\n if isinstance(object1, dict):\n if not isinstance(object2, dict):\n return False\n if set(object1.keys()) != set(object2.keys()):\n return False\n return all(\n _mergeCalibBlocks_isMergeable(object1[key], object2[key])\n for key in object1 if key != \"name\"\n )\n\n if isinstance(object1, str):\n if not isinstance(object2, str):\n return False\n if object1.startswith(\"arm=\") and object2.startswith(\"arm=\"):\n return True\n return object1 == object2\n\n return object1 == object2",
"def union(self, a, b):\n if (a in self.node_id) and (b in self.node_id) and (self.node_id[a] != self.node_id[b]):\n self.merge(a, b)\n elif (a in self.node_id) or (b in self.node_id):\n self.add(a,b)\n else:\n self.create_new_group(a,b)",
"def xor(a: bool, b: bool) -> bool:\n return (a and not b) or (not a and b)",
"def union(a, b):\r\n return list(set(a) | set(b))",
"def __iand__(self, other):\n self.truths = self.truths | other.truths\n return self",
"def __and__(self, other):\n a, b = Trits.match_length(self, other)\n return Trits([x & y for x, y in zip(a, b)])"
] |
[
"0.6346084",
"0.6032152",
"0.57977915",
"0.5719726",
"0.5705259",
"0.5703416",
"0.5640378",
"0.5629587",
"0.5629587",
"0.5621318",
"0.5610552",
"0.5575097",
"0.5558348",
"0.5542803",
"0.55349076",
"0.552503",
"0.5497892",
"0.5482005",
"0.54481745",
"0.5441582",
"0.54183906",
"0.5407479",
"0.5403746",
"0.53991455",
"0.53940916",
"0.5390144",
"0.5384198",
"0.53813255",
"0.5379896",
"0.5379444"
] |
0.7978053
|
0
|
Returns the coordinates of the origin bounding box that are intersected by the intersect bounding box. >>> bounding_box = 10, 100, 30, 110 >>> other_bbox = 20, 100, 40, 105 >>> bounding_box_intersection(bounding_box, other_bbox) (10, 0, 20, 5) >>> bounding_box_intersection(other_bbox, bounding_box) (0, 0, 10, 5) >>> containing_bbox = 4, 55, 44, 115 >>> bounding_box_intersection(bounding_box, containing_bbox) (0, 0, 20, 10) >>> contained_bbox = 12, 102, 22, 108 >>> bounding_box_intersection(bounding_box, contained_bbox) (2, 2, 12, 8) >>> non_overlapping_bbox = 0, 0, 3, 3 >>> bounding_box_intersection(bounding_box, non_overlapping_bbox) is None True
|
def bounding_box_intersection(origin: Tuple[int, int, int, int], intersect: Tuple[int, int, int, int]) \
-> Optional[Tuple[int, int, int, int]]:
o_t, o_l, o_b, o_r = origin
t, l, b, r = intersect
out_top = max(t, o_t)
out_left = max(l, o_l)
out_bottom = min(b, o_b)
out_right = min(r, o_r)
if (out_top < out_bottom) and (out_left < out_right):
return out_top - o_t, \
out_left - o_l, \
out_bottom - o_t, \
out_right - o_l
else:
return None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def intersection(box0, box1):\n if isinstance(box0, bounding_box_pb2.BoundingBox):\n box0 = BoundingBox(box0.start, box0.size)\n if isinstance(box1, bounding_box_pb2.BoundingBox):\n box1 = BoundingBox(box1.start, box1.size)\n if not isinstance(box0, BoundingBox):\n raise ValueError('box0 must be a BoundingBox')\n if not isinstance(box0, BoundingBox):\n raise ValueError('box1 must be a BoundingBox')\n start = np.maximum(box0.start, box1.start)\n end = np.minimum(box0.end, box1.end)\n if np.any(end <= start): return None\n return BoundingBox(start=start, end=end)",
"def test_bounding_box_intersection(self):\n\n west_java = [105, -7, 108, -5]\n jakarta = [106.5, -6.5, 107, -6]\n\n # Test commutative law\n assert numpy.allclose(bbox_intersection(west_java, jakarta),\n bbox_intersection(jakarta, west_java))\n\n # Test inclusion\n assert numpy.allclose(bbox_intersection(west_java, jakarta), jakarta)\n\n # Ignore Bounding Boxes that are None\n assert numpy.allclose(bbox_intersection(west_java, jakarta, None),\n jakarta)\n\n # Realistic ones\n bbox1 = [94.972335, -11.009721, 141.014, 6.073612333333]\n bbox2 = [105.3, -8.5, 110.0, -5.5]\n bbox3 = [105.6, -7.8, 110.5, -5.1]\n\n ref1 = [max(bbox1[0], bbox2[0]),\n max(bbox1[1], bbox2[1]),\n min(bbox1[2], bbox2[2]),\n min(bbox1[3], bbox2[3])]\n assert numpy.allclose(bbox_intersection(bbox1, bbox2), ref1)\n assert numpy.allclose(bbox_intersection(bbox1, bbox2), bbox2)\n\n ref2 = [max(bbox3[0], bbox2[0]),\n max(bbox3[1], bbox2[1]),\n min(bbox3[2], bbox2[2]),\n min(bbox3[3], bbox2[3])]\n assert numpy.allclose(bbox_intersection(bbox3, bbox2), ref2)\n assert numpy.allclose(bbox_intersection(bbox2, bbox3), ref2)\n\n # Multiple boxes\n assert numpy.allclose(bbox_intersection(bbox1, bbox2, bbox3),\n bbox_intersection(ref1, ref2))\n\n assert numpy.allclose(bbox_intersection(bbox1, bbox2, bbox3,\n west_java, jakarta),\n jakarta)\n\n # From actual example\n b1 = [94.972335000000001, -11.009721000000001,\n 141.014002, 6.0736119999999998]\n b2 = (95.059660952000002, -10.997409961000001,\n 141.00132578099999, 5.9109226959999983)\n b3 = (94.972335000000001, -11.009721000000001,\n 141.0140016666665, 6.0736123333332639)\n\n res = bbox_intersection(b1, b2, b3)\n assert numpy.allclose(res, [95.059660952, -10.997409961,\n 141.001325781, 5.910922695999998],\n rtol=1.0e-12, atol=1.0e-12)\n\n # Empty intersection should return None\n assert bbox_intersection(bbox2, [50, 2, 53, 4]) is None\n\n # Deal with invalid boxes\n try:\n bbox_intersection(bbox1, [53, 2, 40, 4])\n except BoundingBoxError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection(bbox1, [50, 7, 53, 4])\n except BoundingBoxError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection(bbox1, 'blko ho skrle')\n except BoundingBoxError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection(bbox1)\n except VerificationError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection('')\n except VerificationError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection()\n except VerificationError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)",
"def bbox_overlap(bbox_a, bbox_b):\n ymin_a, xmin_a, ymax_a, xmax_a = bbox_a\n ymin_b, xmin_b, ymax_b, xmax_b = bbox_b\n\n x_intersection = min(xmax_a, xmax_b) - max(xmin_a, xmin_b) + 1\n y_intersection = min(ymax_a, ymax_b) - max(ymin_a, ymin_b) + 1\n\n if x_intersection <= 0 or y_intersection <= 0:\n return 0\n else:\n return x_intersection * y_intersection",
"def intersection(self, other: 'BoundingBox2D') -> Optional['BoundingBox2D']:\n top_left_point = max(self.xmin, other.xmin), max(self.ymin, other.ymin)\n bottom_right_point = min(self.xmax, other.xmax), min(self.ymax, other.ymax)\n\n bbox = None\n try:\n bbox = BoundingBox2D.from_points(top_left_point, bottom_right_point)\n if bbox.is_null:\n bbox = None\n except ValueError:\n pass\n finally:\n return bbox",
"def intersect(box_a, box_b):\n A = box_a.size(0)\n B = box_b.size(0)\n max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),\n box_b[:, 2:].unsqueeze(0).expand(A, B, 2))\n min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),\n box_b[:, :2].unsqueeze(0).expand(A, B, 2))\n inter = torch.clamp((max_xy - min_xy), min=0)\n return inter[:, :, 0] * inter[:, :, 1]",
"def iou_bbox(bboxes1, bboxes2):\n bboxes1 = np.array(bboxes1, np.float32)\n bboxes2 = np.array(bboxes2, np.float32)\n \n intersection_min_y = np.maximum(bboxes1[:, 0], bboxes2[:, 0])\n intersection_max_y = np.minimum(bboxes1[:, 0] + bboxes1[:, 2] - 1, bboxes2[:, 0] + bboxes2[:, 2] - 1)\n intersection_height = np.maximum(intersection_max_y - intersection_min_y + 1, np.zeros_like(bboxes1[:, 0]))\n\n intersection_min_x = np.maximum(bboxes1[:, 1], bboxes2[:, 1])\n intersection_max_x = np.minimum(bboxes1[:, 1] + bboxes1[:, 3] - 1, bboxes2[:, 1] + bboxes2[:, 3] - 1)\n intersection_width = np.maximum(intersection_max_x - intersection_min_x + 1, np.zeros_like(bboxes1[:, 1]))\n\n area_intersection = intersection_height * intersection_width\n area_first = bboxes1[:, 2] * bboxes1[:, 3]\n area_second = bboxes2[:, 2] * bboxes2[:, 3]\n area_union = area_first + area_second - area_intersection\n \n iou = area_intersection * 1.0 / area_union\n iof = area_intersection * 1.0 / area_first\n ios = area_intersection * 1.0 / area_second\n\n return iou, iof, ios",
"def intersection_over_union(self, other_box):\n \n offset_x = (min(self.x_max, other_box.x_max) \n - max(self.x_min, other_box.x_min))\n offset_y = (min(self.y_max, other_box.y_max) \n - max(self.y_min, other_box.y_min))\n intersection_width = max(0, offset_x)\n intersection_height = max(0, offset_y)\n intersection_area = intersection_width * intersection_height\n union_area = self.get_area() + other_box.get_area() - intersection_area\n return intersection_area / union_area",
"def intersect(box_a, box_b):\n #taken from fastai\n max_xy = torch.min(box_a[:, None, 2:], box_b[None, :, 2:])\n min_xy = torch.max(box_a[:, None, :2], box_b[None, :, :2])\n inter = torch.clamp((max_xy - min_xy), min=0)\n return inter[:, :, 0] * inter[:, :, 1]",
"def bboxes_intersection(bboxes_ref, bboxes2):\n bboxes_ref = np.transpose(bboxes_ref)\n bboxes2 = np.transpose(bboxes2)\n # Intersection bbox and volume.\n int_ymin = np.maximum(bboxes_ref[0], bboxes2[0])\n int_xmin = np.maximum(bboxes_ref[1], bboxes2[1])\n int_ymax = np.minimum(bboxes_ref[2], bboxes2[2])\n int_xmax = np.minimum(bboxes_ref[3], bboxes2[3])\n\n int_h = np.maximum(int_ymax - int_ymin, 0.)\n int_w = np.maximum(int_xmax - int_xmin, 0.)\n int_vol = int_h * int_w\n # Union volume.\n vol = (bboxes_ref[2] - bboxes_ref[0]) * (bboxes_ref[3] - bboxes_ref[1])\n score = int_vol / vol\n return score",
"def rectangle_overlap(a, b):\n # Decompose the coordinates\n a_x0, a_y0, a_x1, a_y1 = a\n b_x0, b_y0, b_x1, b_y1 = b\n\n if a_x1 < b_x0 or b_x1 < a_x0 or a_y1 < b_y0 or b_y1 < a_y0:\n # No intersection\n return 0, None\n else:\n x0, y0, x1, y1 = get_overlap_rectangle(a, b, relative=True)\n width = x1 - x0\n height = y1 - y0\n return (width * height, (x0, y0, x1, y1))",
"def containing(*boxes):\n if not boxes:\n raise ValueError('At least one bounding box must be specified')\n boxes_objs = map(BoundingBox, boxes)\n start = boxes_objs[0].start\n end = boxes_objs[0].end\n for box in boxes_objs[1:]:\n start = np.minimum(start, box.start)\n end = np.maximum(end, box.end)\n return BoundingBox(start=start, end=end)",
"def intersection(self, other):\n VERIFICATION.verify_type(other, Rect, \"intersection target must be Rect\")\n\n funcs = (max, min, max, min)\n intersection_tuple = self._apply_binary_funcs(other, funcs)\n\n (inter_row_start, inter_row_end, inter_col_start, inter_col_end) = intersection_tuple\n if inter_row_start >= inter_row_end or inter_col_start >= inter_col_end:\n return None\n\n return Rect(*intersection_tuple)",
"def boundingbox(self, *args, **kwargs):\n return _image.image_boundingbox(self, *args, **kwargs)",
"def intersect(box_a, box_b):\n\n max_xy = torch.min(box_a[:, 2:].unsqueeze(1), box_b[:, 2:].unsqueeze(0))\n min_xy = torch.max(box_a[:, :2].unsqueeze(1), box_b[:, :2].unsqueeze(0))\n inter = torch.clamp((max_xy - min_xy), min=0)\n return inter[:, :, 0] * inter[:, :, 1]",
"def bboxes_intersection(bbox_ref, bboxes, name=None):\n with tf.name_scope(name, 'bboxes_intersection'):\n # Should be more efficient to first transpose.\n bboxes = tf.transpose(bboxes)\n bbox_ref = tf.transpose(bbox_ref)\n # Intersection bbox and volume.\n int_ymin = tf.maximum(bboxes[0], bbox_ref[0])\n int_xmin = tf.maximum(bboxes[1], bbox_ref[1])\n int_ymax = tf.minimum(bboxes[2], bbox_ref[2])\n int_xmax = tf.minimum(bboxes[3], bbox_ref[3])\n h = tf.maximum(int_ymax - int_ymin, 0.)\n w = tf.maximum(int_xmax - int_xmin, 0.)\n # Volumes.\n inter_vol = h * w\n bboxes_vol = (bboxes[2] - bboxes[0]) * (bboxes[3] - bboxes[1])\n #scores = tfe_math.safe_divide(inter_vol, bboxes_vol, 'intersection')\n scores = inter_vol / bboxes_vol\n return scores",
"def bboxes_intersect(bboxes_ref, bboxes2):\n intersect_bboxes = np.zeros_like(bboxes2)\n\n bboxes_ref = np.transpose(bboxes_ref)\n bboxes2 = np.transpose(bboxes2)\n\n # Intersection bbox and volume.\n intersect_bboxes[:, 0] = np.maximum(bboxes_ref[0], bboxes2[0])\n intersect_bboxes[:, 1] = np.maximum(bboxes_ref[1], bboxes2[1])\n intersect_bboxes[:, 2] = np.minimum(bboxes_ref[2], bboxes2[2])\n intersect_bboxes[:, 3] = np.minimum(bboxes_ref[3], bboxes2[3])\n\n return intersect_bboxes",
"def intersection(boxes1, boxes2):\n ymin1, xmin1, ymax1, xmax1 = np.split(boxes1, 4, axis=1)\n ymin2, xmin2, ymax2, xmax2 = np.split(boxes2, 4, axis=1)\n\n pairwise_min_ymax = np.minimum(ymax1, np.transpose(ymax2))\n pairwise_max_ymin = np.maximum(ymin1, np.transpose(ymin2))\n intersect_heights = np.maximum(\n# np.zeros(pairwise_max_ymin.shape),\n 0.0,\n pairwise_min_ymax - pairwise_max_ymin)\n\n\n pairwise_min_xmax = np.minimum(xmax1, np.transpose(xmax2))\n pairwise_max_xmin = np.maximum(xmin1, np.transpose(xmin2))\n intersect_widths = np.maximum(\n# np.zeros(pairwise_max_xmin.shape),\n 0.0,\n pairwise_min_xmax - pairwise_max_xmin)\n return intersect_heights * intersect_widths",
"def intersection(boxes1: np.array, boxes2: np.array) -> np.array:\n inter_coords = np.clip(\n np.minimum(boxes1[:, None, 2:], boxes2[:, 2:]) - np.maximum(boxes1[:, None, :2], boxes2[:, :2]),\n a_min=0,\n a_max=None\n )\n\n return np.prod(inter_coords, 2)",
"def bbox_iou(box1, box2, x1y1x2y2=True):\r\n if x1y1x2y2:\r\n # Get the coordinates of bounding boxes\r\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]\r\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]\r\n else:\r\n # Transform from center and width to exact coordinates\r\n b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2\r\n b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2\r\n b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2\r\n b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2\r\n\r\n # get the coordinates of the intersection rectangle\r\n inter_rect_x1 = torch.max(b1_x1, b2_x1)\r\n inter_rect_y1 = torch.max(b1_y1, b2_y1)\r\n inter_rect_x2 = torch.min(b1_x2, b2_x2)\r\n inter_rect_y2 = torch.min(b1_y2, b2_y2)\r\n # Intersection area\r\n inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1, 0) * torch.clamp(inter_rect_y2 - inter_rect_y1, 0)\r\n # Union Area\r\n b1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1)\r\n b2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1)\r\n\r\n # print(box1, box1.shape)\r\n # print(box2, box2.shape)\r\n return inter_area / (b1_area + b2_area - inter_area + 1e-16)",
"def intersection_over_union_from_boxes(boxA, boxB):\n\n\t# determine the (x, y)-coordinates of the intersection rectangle\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n\n # compute the area of intersection rectangle\n interArea = max(0, xB - xA + 0) * max(0, yB - yA + 0)\n\n # compute the area of both the prediction and ground-truth rectangles\n boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = interArea / float(boxAArea + boxBArea - interArea)\n\n return iou",
"def get_bbox_overlap(self, that, epsilon):\n if (not isinstance(that, Annotation)):\n raise ValueError(\"Argument for intersects should be an annotation\")\n\n # find the width and height of the overlapping rectangle\n width = min(self.bbox.xmax, that.bbox.xmax) - \\\n max(self.bbox.xmin, that.bbox.xmin)\n height = min(self.bbox.ymax, that.bbox.ymax) - \\\n max(self.bbox.ymin, that.bbox.ymin)\n\n height = abs(that.bbox.ymax - self.bbox.ymin) + epsilon\n\n return (width, height)",
"def doBoundingBoxesIntersect(self, other):\n if(self.upperLeft.x <= other.lowerRight.x and\n self.lowerRight.x >= other.upperLeft.x and\n self.upperLeft.y >= other.lowerRight.y and\n self.lowerRight.y <= other.upperLeft.y):\n return True\n return False",
"def intersection(boxes1, boxes2):\n y_min1, x_min1, y_max1, x_max1 = np.split(boxes1, 4, axis=-1)\n y_min2, x_min2, y_max2, x_max2 = np.split(boxes2, 4, axis=-1)\n\n all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))\n all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))\n\n intersect_heights = np.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)\n all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))\n all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))\n\n intersect_widths = np.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)\n\n return intersect_heights * intersect_widths",
"def bbox_intersect(a_ary, b_ary):\r\n # Do any of the 4 corners of one bbox lie inside the other bbox?\r\n # bbox format of [ll, ur]\r\n # bbx[0] is lower left\r\n # bbx[1] is upper right\r\n # bbx[0][0] is lower left longitude\r\n # bbx[0][1] is lower left latitude\r\n # bbx[1][0] is upper right longitude\r\n # bbx[1][1] is upper right latitude\r\n\r\n # Detect longitude and latitude overlap\r\n if is_overlap_sorted_values(a_ary[0][0], a_ary[1][0], b_ary[0][0], b_ary[1][0]) \\\r\n and is_overlap_sorted_values(a_ary[0][1], a_ary[1][1], b_ary[0][1], b_ary[1][1]):\r\n return True\r\n else:\r\n return False",
"def has_intersection(bboxa, bboxb):\n yamin, xamin, ha, wa = bboxa\n ybmin, xbmin, hb, wb = bboxb\n yamax, xamax = yamin+ha, xamin+wa\n ybmax, xbmax = ybmin+hb, xbmin+wb\n xs, ys = max(xamin, xbmin), max(yamin, ybmin)\n xe, ye = min(xamax, xbmax), min(yamax, ybmax)\n return xe > xs and ye > ys",
"def get_overlap_box(box1, box2):\n xmax = np.minimum(box1[2], box2[2])\n xmin = np.maximum(box1[0], box2[0])\n ymax = np.minimum(box1[3], box2[3])\n ymin = np.maximum(box1[1], box2[1])\n\n return np.array([xmin, ymin, xmax, ymax])",
"def intersection(boxes1, boxes2):\n [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)\n [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)\n\n all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))\n all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))\n intersect_heights = np.maximum(\n np.zeros(all_pairs_max_ymin.shape),\n all_pairs_min_ymax - all_pairs_max_ymin,\n )\n all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))\n all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))\n intersect_widths = np.maximum(\n np.zeros(all_pairs_max_xmin.shape),\n all_pairs_min_xmax - all_pairs_max_xmin,\n )\n return intersect_heights * intersect_widths",
"def intersection(self, rect1, rect2):\n x_overlap = max(0, min(rect1[2], rect2[2]) - max(rect1[0], rect2[0]));\n y_overlap = max(0, min(rect1[3], rect2[3]) - max(rect1[1], rect2[1]));\n overlapArea = x_overlap * y_overlap;\n return overlapArea",
"def compute_intersection(boxes1: Type[Union[Tensor, np.ndarray]],\n boxes2: Type[Union[Tensor, np.ndarray]]):\n if isinstance(boxes1, Tensor):\n return intersection_pt(boxes1, boxes2)\n return intersection_np(boxes1, boxes2)",
"def _bbox_overlap(self, other):\n reg0 = self.bbox\n reg1 = other.bbox\n return (reg0[0] <= reg1[2] and reg1[0] <= reg0[2] and\n reg0[1] <= reg1[3] and reg1[1] <= reg0[3])"
] |
[
"0.73368526",
"0.68448746",
"0.6794572",
"0.6752013",
"0.6524481",
"0.6365032",
"0.6349253",
"0.6339419",
"0.63276106",
"0.6230844",
"0.6219065",
"0.6181393",
"0.6178924",
"0.6171056",
"0.6161214",
"0.615606",
"0.61267775",
"0.61151683",
"0.60859907",
"0.607529",
"0.60643137",
"0.6048354",
"0.60440993",
"0.6040687",
"0.60099125",
"0.5991811",
"0.59698194",
"0.5965636",
"0.59275764",
"0.5916023"
] |
0.7157741
|
1
|
Aligns prediction Nodes to truth.
|
def align_nodes(truth: List[Node], prediction: List[Node], fscore=None) -> List[Tuple[int, int]]:
if fscore is None:
_, _, fscore = compute_recall_precision_fscore(truth, prediction)
# For each prediction (column), pick the highest-scoring
# True symbol.
closest_truths = list(fscore.argmax(axis=0))
# Checking for duplicate "best ground truth" alignments.
# This does *not* check for duplicates in the sense
# "multiple predictions aligned to the same truth"
# or "multiple truths aligned to the same prediction",
# it only acts as a tie-breaker in case one prediction overlaps
# to the same degree multiple truth objects (e.g. a single sharp
# in a key signature).
closest_truth_distance = [fscore[ct, j]
for j, ct in enumerate(closest_truths)]
equidistant_closest_truths = [[i for i, x in enumerate(fscore[:, j])
if x == ct]
for j, ct in enumerate(closest_truth_distance)]
class_name_aware_closest_truths = []
for j, ects in enumerate(equidistant_closest_truths):
best_truth_i = int(ects[0])
# If there is more than one tied best choice,
# try to choose the truth Node that has the same
# class as the predicted Node.
if len(ects) > 1:
ects_c = {truth[int(i)].class_name: i for i in ects}
j_class_name = prediction[j].class_name
if j_class_name in ects_c:
best_truth_i = int(ects_c[j_class_name])
class_name_aware_closest_truths.append(best_truth_i)
alignment = [(t, p) for p, t in enumerate(class_name_aware_closest_truths)]
return alignment
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def define_prediction(self):\n with self.graph.as_default():\n self.predict_label = tf.argmax(self.l7, 1)\n self.predict_prob = tf.nn.softmax(self.l7)\n self.correct_label = tf.argmax(self.label, 1)\n self.accuracy = tf.reduce_mean(\n tf.cast(\n tf.equal(\n self.predict_label,\n self.correct_label\n ),\n tf.float32\n )\n )\n\n # Aggiungo accuracy all'elenco del sommario per tensorboard\n tf.summary.scalar(\"accuracy\", self.accuracy)\n return self.accuracy, self.predict_label",
"def update_state(self, prediction_begin, prediction_end, label_begin,\n label_end):\n tp = math_ops.cast(\n calculate_true_positive(prediction_begin, prediction_end, label_begin,\n label_end), dtypes.float32)\n num_pred = math_ops.cast(\n ragged_array_ops.size(prediction_begin), dtypes.float32)\n num_gold = math_ops.cast(ragged_array_ops.size(label_begin), dtypes.float32)\n fp = num_pred - tp\n fn = num_gold - tp\n self.true_positive.assign_add(tp)\n self.false_positive.assign_add(fp)\n self.false_negative.assign_add(fn)",
"def _finalize_labels_and_prediction(self):\n y_pred = torch.cat(self.y_pred, dim=0)\n y_true = torch.cat(self.y_true, dim=0)\n\n if (self.mean is not None) and (self.std is not None):\n # To compensate for the imbalance between labels during training,\n # we normalize the ground truth labels with training mean and std.\n # We need to undo that for evaluation.\n y_pred = y_pred * self.std + self.mean\n\n return y_pred, y_true",
"def _finalize_labels_and_prediction(self):\n y_pred = torch.cat(self.y_pred, dim=0)\n y_true = torch.cat(self.y_true, dim=0)\n\n if (self.mean is not None) and (self.std is not None):\n # To compensate for the imbalance between labels during training,\n # we normalize the ground truth labels with training mean and std.\n # We need to undo that for evaluation.\n y_pred = y_pred * self.std + self.mean\n\n return y_pred, y_true",
"def predict(self):\n train_array = np.array(self.labels != 0, dtype=float)\n if not self.ising:\n labels_logit = self.ising_weights['vertices']\n else:\n neigh_num = self.adj.dot(train_array)\n neigh_num = np.where(neigh_num == 0, 1, neigh_num)\n neigh_weights = self.ising_weights['edges'] * self.labels\n labels_logit = (np.multiply(neigh_weights, neigh_num**(-1))\n + self.ising_weights['vertices'])\n self.prediction = np.where(labels_logit > 0, 1, -1)\n return self",
"def calculate_link_prediction_score(self):\n calculate_method = (\n self.calculate_score_persona\n if self.is_persona_emb\n else self.calculate_score\n )\n self.link_prediction_score_positive = np.array(\n calculate_method(self.test_edges)\n )\n self.link_prediction_score_negative = np.array(\n calculate_method(self.negative_edges)\n )",
"def prediction_a_all(self):\n return self._prediction_a_all",
"def add_signal_align_predictions(self):\n # TODO call signalalign if not called\n sa_events = self.get_signalalign_events()\n # cut out duplicates\n sa_events = np.unique(sa_events)\n events = self.get_resegment_basecall()\n predictions = match_events_with_signalalign(sa_events=sa_events, event_detections=events)\n # rna reference positions are on 5' edge aka right side of kmer\n if self.rna:\n predictions[\"reference_index\"] -= self.kmer_index\n else:\n predictions[\"reference_index\"] += self.kmer_index\n self.aligned_signal.add_label(predictions, name=\"full_signalalign\", label_type='prediction')\n return True",
"def check_prediction(self):\n predicted_scores = self.sess.run(self.NET.output_with_relu, feed_dict={self.NET.input: self.test_image if len(self.test_image.shape)==4 else [self.test_image]})\n self.original_confidence = np.max(predicted_scores)\n if np.argmax(predicted_scores,1) != self.original_label:\n print(\"Network's Prediction is Already Incorrect!\")\n return True\n else:\n return False",
"def _make_prediction(self):\n for i in range(3):\n self.prediction += random.choice(['0', '1'])\n\n for i in range(len(self.test_string) - 3):\n triad = self.test_string[i] + self.test_string[i + 1] + self.test_string[i + 2]\n self.prediction += predict_next(triad, self.triad_counts)\n\n return",
"def transform(self, net, data, prediction, truth):\n raw_output = prediction['output']\n # raw_output = tf.constant(np.load('out.npy'))\n batch = util.shape(raw_output)[0]\n prediction = tf.reshape(\n raw_output,\n (batch, self.num_cells, self.num_cells,\n self.num_anchors, 4 + 1 + self.num_classes))\n box_predict, obj_predict, class_predict = tf.split(\n prediction, [4, 1, self.num_classes], axis=-1)\n obj_predict = tf.nn.sigmoid(obj_predict)\n obj_predict_squeeze = tf.squeeze(obj_predict, -1)\n xy_original, wh_original = tf.split(box_predict, [2, 2], axis=-1)\n xy_predict, wh_predict = tf.sigmoid(xy_original), tf.exp(wh_original)\n prediction = {\n 'raw': raw_output,\n 'object': obj_predict_squeeze,\n 'object_mask': obj_predict,\n 'coordinate': [xy_predict, wh_original],\n 'outbox': self._cell_to_global(xy_predict, wh_predict),\n 'class': tf.nn.softmax(class_predict),\n }\n inputs = (\n prediction['object_mask'], prediction['class'],\n prediction['outbox'])\n corner_test, score_test, class_test, count_test = map_fn(\n self._filter, inputs,\n dtype=(tf.float32, tf.float32, tf.int32, tf.int32))\n prediction['test'] = {\n 'corner': corner_test,\n 'class': class_test,\n 'score': score_test,\n 'count': count_test,\n }\n # the original box and label values from the dataset\n image = data['input']\n if truth:\n rawlabel, truebox, count = truth\n rawlabel += self.label_offset\n truebox = util.corners_to_box(truebox)\n obj, box, label = map_fn(\n self._truth_to_cell, (truebox, rawlabel, count),\n dtype=(tf.float32, tf.float32, tf.int32))\n truth = {\n 'object': obj,\n 'object_mask': tf.expand_dims(obj, -1),\n 'box': box,\n 'class': slim.one_hot_encoding(label, self.num_classes),\n 'count': count,\n 'rawbox': truebox,\n 'rawclass': rawlabel,\n }\n return image, prediction, truth",
"def _predict(self, inputs):\n node = self.tree_\n while node.left:\n if inputs[node.feature_index] < node.split:\n node = node.left\n else:\n node = node.right\n return node.predicted_class",
"def test_prediction(self):\n learner, held_out_student_idx = self.set_up_learner(True, False, 1e9)\n\n # store all node's data and reference IDs for CPDs and param_node dicts\n orig_datas = {key: node.data for key, node in learner.nodes.iteritems()}\n orig_cpd_ids = {key: id(node.cpd) for key, node in learner.nodes.iteritems()}\n orig_param_node_ids = {key: id(node.param_nodes)\n for key, node in learner.nodes.iteritems()}\n orig_fields = {}\n for field in ('callback', 'max_iterations', 'log_posterior', 'iter'):\n orig_fields[field] = getattr(learner, field)\n\n prob_correct = get_online_rps(learner, held_out_student_idx, max_iterations=1000)\n\n # get the test node with all the appended test responses\n test_correct = learner.nodes[TEST_RESPONSES_KEY].data\n\n valid_idx = np.isfinite(prob_correct)\n num_nan_rp = len(prob_correct) - np.sum(valid_idx)\n # check that number of NaN RPs equals total number of students\n self.assertEqual(num_nan_rp, len(np.unique(held_out_student_idx)))\n online_pc = Metrics.online_perc_correct(test_correct, held_out_student_idx)\n np.testing.assert_array_almost_equal(prob_correct[valid_idx], online_pc[valid_idx],\n decimal=6)\n\n # test that the original quantities are not modified\n for key in orig_datas:\n self.assertTrue(learner.nodes[key].data is orig_datas[key])\n np.testing.assert_array_equal(learner.nodes[key].data, orig_datas[key])\n self.assertTrue(id(learner.nodes[key].cpd) == orig_cpd_ids[key])\n self.assertTrue(id(learner.nodes[key].param_nodes) == orig_param_node_ids[key])\n for field, value in orig_fields.iteritems():\n self.assertTrue(getattr(learner, field) is value)\n\n # test that running online prediction again yields the same result; this time modify learner\n prob_correct_mod = get_online_rps(learner, held_out_student_idx, max_iterations=1000,\n copy_learner=False)\n np.testing.assert_equal(prob_correct_mod, prob_correct)\n # original responses should not have been modified, but thetas should have been\n for key in orig_datas:\n if key == THETAS_KEY:\n self.assertFalse(learner.nodes[key].data is orig_datas[key])\n else:\n self.assertTrue(learner.nodes[key].data is orig_datas[key])\n self.assertTrue(id(learner.nodes[key].cpd) == orig_cpd_ids[key])\n self.assertTrue(id(learner.nodes[key].param_nodes) == orig_param_node_ids[key])",
"def batch_predict(tree_adj, training_signs, edge_weight):\n # since shazoo use the revealed signs as-is, it's ok to use the same name\n training_signs, l2_values, rta_signs = training_signs\n all_nodes_to_predict = set(tree_adj) - set(training_signs)\n logging.debug('batch_predict has %d nodes to predict', len(all_nodes_to_predict))\n methods = ['l2cost', 'rta', 'shazoo']\n # fields are current_closest_hinge, current_sign, current_dst_to_closest_hinge\n node_predictions = {m: defaultdict(lambda: (None, None, 2e9)) for m in methods}\n hinge_value = {m: {} for m in methods}\n total_iter = 0\n while all_nodes_to_predict:\n some_root_of_a_border_tree = next(iter(all_nodes_to_predict))\n hinge_nodes, border_tree_nodes = find_hinge_nodes(tree_adj, edge_weight, training_signs,\n some_root_of_a_border_tree,\n with_visited=True)\n unmarked = border_tree_nodes - hinge_nodes\n for u in hinge_nodes:\n if u in hinge_value['shazoo']:\n continue\n vals, _, status = flep(tree_adj, (training_signs, rta_signs), edge_weight, u)\n hinge_value['shazoo'][u] = sgn(vals[0])\n hinge_value['rta'][u] = sgn(vals[1])\n if not USE_SCIPY:\n continue\n border_tree = build_border_tree_from_mincut_run(status, edge_weight)\n _, E, El, leaves_sign, _, _ = border_tree\n L = {u: l2_values[u] for u in leaves_sign}\n mapped_E, mapped_El_L, mapping = preprocess_edge_and_leaves(E, El, L)\n val = solve_by_zeroing_derivative(mapped_E, mapped_El_L, mapping, L,\n reorder=False)[0][u]\n hinge_value['l2cost'][u] = sgn(val)\n predicted_in_that_border_tree = set()\n inner_iter = 0\n # to avoid the same fork being picked again and again\n unmarked.add(some_root_of_a_border_tree)\n while unmarked:\n one_to_predict = next(iter(unmarked))\n hinge_tree = get_hinge_tree(one_to_predict, tree_adj, hinge_nodes)\n other_predicted = set()\n for h, h_val in iteritems(hinge_value['shazoo']):\n if h not in hinge_tree:\n continue\n predicted = propagate_hinge(hinge_tree, h, h_val, node_predictions['shazoo'],\n edge_weight)\n for u in predicted:\n prediction_info = node_predictions['shazoo'][u]\n used_hinge = prediction_info[0]\n node_predictions['rta'][u] = (used_hinge, hinge_value['rta'][used_hinge],\n prediction_info[2])\n if not USE_SCIPY:\n continue\n node_predictions['l2cost'][u] = (used_hinge, hinge_value['l2cost'][used_hinge],\n prediction_info[2])\n other_predicted.update(predicted)\n predicted_in_that_border_tree.update(other_predicted)\n unmarked -= other_predicted\n inner_iter += 1\n if inner_iter > len(tree_adj):\n import time\n logging.critical('batch predict failed in the inner loop')\n persistent.save_var('__fail_{}.my'.format(int(time.time())), (tree_adj, (training_signs, l2_values, rta_signs), edge_weight))\n raise RuntimeError('batch predict failed in the inner loop')\n all_nodes_to_predict -= predicted_in_that_border_tree\n total_iter += 1\n if total_iter > len(tree_adj):\n import time\n logging.critical('batch predict failed in the outer loop')\n persistent.save_var('__fail_{}.my'.format(int(time.time())), (tree_adj, (training_signs, l2_values, rta_signs), edge_weight))\n raise RuntimeError('batch predict failed in the outer loop')\n logging.debug('batch_predict has actually predicted %d nodes', len(node_predictions) - len(training_signs))\n return {m: {u: v[1] for u, v in iteritems(node_predictions[m]) if u not in training_signs}\n for m in methods}",
"def predict_only(self):",
"def make_predictions(self):\n \n self.Y = self.X.dot(self.w)",
"def accuracy(prediction_node, target_node, name):\n prediction_positive = tf.less(0.5, prediction_node)\n target_positive = tf.less(0.5, target_node)\n correct = tf.equal(prediction_positive, target_positive)\n correct_floats = tf.cast(correct, tf.float32)\n return tf.reduce_mean(correct_floats, name=name)",
"def output_to_prediction(darknet_output):\n darknet_output = tf.reshape(darknet_output, [-1, 13, 13, 5, 85])\n\n xy_offset = tf.nn.sigmoid(darknet_output[..., 0:2]) # 中心坐标相对于该cell左上角的偏移量,sigmoid函数归一化到0-1\n\n wh_offset = tf.exp(darknet_output[..., 2:4]) # 相对于anchor的wh比例,通过e指数解码\n\n obj_probs = tf.nn.sigmoid(darknet_output[..., 4:5]) # 置信度,sigmoid函数归一化到0-1\n\n class_probs = tf.nn.softmax(darknet_output[..., 5:]) # 网络回归的是'得分',用softmax转变成类别概率\n\n prediction = tf.concat([xy_offset, wh_offset, obj_probs, class_probs], axis=-1)\n return prediction",
"def setPrediction(self,pred):\n self.prediction = pred",
"def align_targets(predictions, targets):\n if (getattr(predictions, 'broadcastable', None) == (False, True) and\n getattr(targets, 'ndim', None) == 1):\n targets = as_theano_expression(targets).dimshuffle(0, 'x')\n return predictions, targets",
"def nnPredict(w1,w2,data):\n \n labels = np.array([])\n \n # create bias row\n bias_row =np.ones((np.size(data,0),1))\n \n # concatenate bias with data matrix\n data=np.concatenate((data,bias_row),axis=1)\n \n #Calculate input to hidden layer\n intput_hidden_layer= np.dot(data,w1.transpose()) \n \n #Calculate output of hidden layer using sigmoid function\n output_hidden_layer= sigmoid(intput_hidden_layer)\n \n #Calculate input to output nodes\n input_with_bias = np.concatenate((output_hidden_layer,bias_row),axis=1) \n input_output_node= np.dot(input_with_bias,w2.transpose()) \n \n # Calculate output of output layer\n output_layer= sigmoid(input_output_node) \n \n # get index of maximum from all rows in ouput layer matrix\n labels = np.argmax(output_layer,axis=1) \n \n return labels",
"def sigmoid2predictions(output: torch.Tensor) -> torch.Tensor:\n return (torch.sign(output - 0.5) + 1) / 2",
"def predict(self):\n add = np.ones(len(self.X_test))\n X_add = np.c_[add, self.X_test]\n pred = np.dot(X_add, self.w_result.T)\n\n pred[pred > 0] = 1\n pred[pred < 0] = 0\n return pred",
"def add_prediction_op(self, outputs):\n dropout_rate = self.dropout_placeholder\n U = tf.get_variable(\"OutputWeights\", shape = (self.config.hidden_size, self.config.n_classes), initializer = tf.contrib.layers.xavier_initializer())\n b_2 = tf.get_variable(\"OutputBias\", shape = (self.config.n_classes), initializer = tf.zeros_initializer())\n\n outputs = tf.nn.dropout(outputs, dropout_rate) \n\n outputs = tf.reshape(outputs, [-1, self.config.hidden_size]) \n preds = tf.add(tf.matmul(outputs, U), b_2)\n preds = tf.reshape(preds, [self.config.batch_size, -1, self.config.n_classes])\n #preds = tf.Print(preds, [preds], summarize = self.config.n_classes)\n return preds",
"def predict(X, est_dict):\n \n def sign(arr):\n for i,a in enumerate(arr):\n if a >= 0:\n arr[i] = 1\n else:\n arr[i] = 0\n return arr\n \n def change_labels(arr, l1, l2):\n for i,a in enumerate(arr):\n if a == l1:\n arr[i] = l2\n return arr \n \n total = 0 \n for (tree, alpha) in est_dict.values():\n total += alpha * change_labels(tree.predict(X), 0, -1)\n \n return sign(total)",
"def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor):\n\n if self.sparse_y_true:\n # Shape: (..., num_classes, ...)\n y_true = tf.one_hot(\n tf.cast(y_true, dtype=tf.int32),\n self.num_classes,\n axis=self.axis,\n on_value=True,\n off_value=False,\n )\n if self.sparse_y_pred:\n # Shape: (..., num_classes, ...)\n y_pred = tf.one_hot(\n tf.cast(y_pred, dtype=tf.int32),\n self.num_classes,\n axis=self.axis,\n on_value=True,\n off_value=False,\n )\n\n one_hot_axis = self.axis if self.axis >= 0 else (\n len(y_true.get_shape().as_list()) + self.axis)\n # Reduce sum the leading dimensions.\n # Shape: (num_classes, ...)\n current_intersection = tf.math.count_nonzero(\n y_pred & y_true, axis=np.arange(one_hot_axis), dtype=tf.float32\n )\n # Shape: (num_classes, ...)\n current_union = tf.math.count_nonzero(\n y_pred | y_true, axis=np.arange(one_hot_axis), dtype=tf.float32\n )\n\n self.intersection_per_class.assign_add(\n tf.cast(current_intersection, self.intersection_per_class.dtype))\n self.union_per_class.assign_add(\n tf.cast(current_union, self.union_per_class.dtype))",
"def transform_preds_to_boolean(img_size, preds):\n out = np.zeros(img_size, dtype=bool)\n\n adder = np.array([0., 1., 0., 1.])\n\n for i in preds:\n bbox = (i[0:4] + adder).astype(int)\n out[bbox[1]:bbox[3], bbox[0]:bbox[2]] = True\n\n return out",
"def rotate_predictions(predictions: npt.NDArray[np.number | np.bool8], quads: int) -> npt.NDArray[np.number | np.bool8]:\n return np.roll(predictions, -quads, axis=1)",
"def run_prediction(self):\r\n self.get_prediction_indices()\r\n self.walk_forward_prediction()",
"def predict(self, new_table):\r\n \r\n if self.is_leaf:\r\n if len(self.path) == 1:\r\n self.cur.execute(\"SELECT ROUND(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + self.path[0] + \";\")\r\n else:\r\n self.cur.execute(\"SELECT ROUND(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + \" AND \".join(self.path) + \";\")\r\n y = self.cur.fetchone()[0]\r\n self.cur.execute(\"UPDATE \" + new_table + \" SET preds = \" + str(y) + \" WHERE \" + \" AND \".join(self.path) + \";\")\r\n \r\n else:\r\n self.lhs.predict(new_table)\r\n self.rhs.predict(new_table)"
] |
[
"0.602781",
"0.5878617",
"0.576729",
"0.576729",
"0.56675243",
"0.55929726",
"0.5590841",
"0.5583576",
"0.5567605",
"0.55633736",
"0.5551773",
"0.5531288",
"0.5500199",
"0.5471059",
"0.5425876",
"0.54105395",
"0.53748655",
"0.53647625",
"0.5345529",
"0.534533",
"0.5344353",
"0.53232664",
"0.5301306",
"0.52914757",
"0.5287957",
"0.528486",
"0.5271117",
"0.527077",
"0.52576435",
"0.5246911"
] |
0.61948144
|
0
|
writes a pixel of varying size depending on the size of the overall image
|
def write_pixel(color, img_size, location, image, scale_factor):
x_location = scale(location.item(0), scale_factor)
y_location = scale(location.item(1), scale_factor)
img_cont = int(img_size/100)
if img_cont == 0:
image.putpixel((x_location, y_location), color)
else:
write_to_range(x_location-img_cont, x_location+img_cont, y_location-img_cont, y_location+img_cont, color, image, img_size)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def putPixel(new_image, message_to_encode):\n w = new_image.size[0]\n (x, y) = (0, 0)\n for pix in modifyPixel(new_image.getdata(), message_to_encode):\n new_image.putpixel((x, y), pix)\n if (x == w - 1):\n x = 0\n y += 1\n else:\n x += 1",
"def exportImg(self):\n if self.superSampling:\n print(\"Exporting with size adjusted\")\n self.img = self.img.resize((int(self.width/2),int(self.height/2)),Image.NEAREST)\n self.img.save(self.fileName,\"PNG\")",
"def imwrite(image, path):\n\n if image.ndim == 3 and image.shape[2] == 1: # for gray image\n image = np.array(image, copy=True)\n image.shape = image.shape[0:2]\n\n imgarray=((image+1.0)*127.5).astype(np.uint8)\n img=Image.fromarray(imgarray)\n img.save(path)",
"def write(self, image):\n raise NotImplementedError()",
"def new_image(x, y, out, data):\n img = Image.new('RGB', (x, y))\n img.putdata(data)\n img.save(out)",
"def upscale_main_side(output_img):\n #Fixing pixel which y % 2 != 1\n outputx, outputy = output_img.size\n for oy in range(0, outputy-1, 2):\n for ox in range(1, outputx, 2):\n pixel1 = output_img.getpixel((ox-1, oy))\n p1 = pixel1[0]\n p2 = pixel1[1]\n p3 = pixel1[2]\n if ox == outputx-1 :\n output_img.putpixel((ox, oy), (p1, p2, p3))\n else:\n pixel2 = output_img.getpixel((ox+1, oy))\n P1 = pixel2[0]\n P2 = pixel2[1]\n P3 = pixel2[2]\n output_img.putpixel((ox, oy), (int((p1+P1)/2), int((p2+P2)/2), int((p3+P3)/2)))\n #print(f'pixel:{ox, oy} output:{output_img.getpixel((ox,oy))}')\n #Fixing pixel which y % 2 == 1\n for oy in range(1, outputy-1, 2):\n for ox in range(0, outputx):\n pixel1 = output_img.getpixel((ox, oy-1))\n p1 = pixel1[0]\n p2 = pixel1[1]\n p3 = pixel1[2]\n if oy == outputx:\n output_img.putpixel((ox, oy), (p1, p2, p3))\n break\n else:\n pixel2 = output_img.getpixel((ox, oy+1))\n P1 = pixel2[0]\n P2 = pixel2[1]\n P3 = pixel2[2]\n output_img.putpixel((ox, oy), (int((p1+P1)/2), int((p2+P2)/2), int((p3+P3)/2)))\n #print(f'pixel:{ox, oy} output:{output_img.getpixel((ox,oy))}')\n #Save image \n result_img = output_path+'/output.'+working_img.format.lower()\n output_img.save(result_img)\n print('Upscale finished..!')\n output_img.show()",
"def testImage():\n width = 200\n height = 200\n image = BitMap( width, height )\n \n # create a loop in order to draw some pixels\n \n for col in range(width):\n if col % 10 == 0: print 'col is', col\n for row in range(height):\n if col % 10 == 0 or row % 10 == 0:\n image.plotPoint( col, row ) \n \n # we have now looped through every image pixel\n # next, we write it out to a file\n \n image.saveFile( \"test.bmp\" )\n #changing the col and row number determines how big the grid is for the picture or how zoomed in it is. Changing the and to or just makes the grid go from dotted grid to lines.",
"def pad_image(np_img, new_img_file):\n\n h, w, c = np_img.shape\n side_len = max(h, w)\n # Create our square \"palette\" or area upon which the image data is placed\n # Make it kinda grey (e.g. a palette of all > 1)\n new_np_img = np.ones(side_len * side_len * c).reshape(side_len, \n side_len, c) * 100\n\n if h > w:\n for i in range(c):\n # Multiply by 255 because plt read in as vals 0-1, not 0-255\n old_patch = np_img[:, :, i] * 255\n pad = (side_len - w)//2\n new_np_img[:, pad:(pad + w), i] = old_patch\n plt.imsave(new_img_file, new_np_img)\n elif w > h:\n for i in range(c):\n old_patch = np_img[:, :, i] * 255\n pad = (side_len - h)//2\n new_np_img[pad:(pad + h), :, i] = old_patch\n plt.imsave(new_img_file, new_np_img)\n else:\n # Image already square - lucky!\n plt.imsave(new_img_file, np_img)",
"def set_pixel_size(img, pixel_size):\n\tnz = img.get_zsize()\n\timg.set_attr(\"apix_x\", round(pixel_size, 3))\n\timg.set_attr(\"apix_y\", round(pixel_size, 3))\n\timg.set_attr(\"apix_z\", round(pixel_size, 3))\n\tcc = img.get_attr_default(\"ctf\", None)\n\tif(cc):\n\t\tcc.apix = pixel_size\n\t\timg.set_attr(\"ctf\", cc)",
"def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0",
"def write_jpeg(filename,band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t stepsz=1.,clobber=False,verbose=0,tscale=1000.,retries=20):\n\tscipy.misc.imsave(filename,countmap(band,skypos,tranges,skyrange,\n\t\t\t\t\t width=width,height=height,verbose=verbose,tscale=tscale,\n\t\t\t\t\t retries=retries))\n\treturn",
"def write_to_range(start_x, end_x, start_y, end_y, color, image, img_size):\n\n for curr_x in range(start_x, end_x, 1):\n for curr_y in range(start_y, end_y, 1):\n if curr_x > 0 and curr_y > 0:\n if curr_x < img_size and curr_y < img_size:\n image.putpixel((curr_x, curr_y), color)",
"def write(self, filename):\n f = open(filename, 'bw')\n\n # file header (14)\n f.write(char('B'))\n f.write(char('M'))\n f.write(dword(14 + 40 + self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(14 + 40))\n\n # image header (40)\n f.write(dword(40))\n f.write(dword(self.width))\n f.write(dword(self.height))\n f.write(word(1))\n f.write(word(24))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n\n # pixel data\n for x in range(self.height):\n for y in range(self.width):\n f.write(self.pixels[x][y])\n f.close()",
"def write_png(buffer, width, height, fileobj, dpi=None): # real signature unknown; restored from __doc__\n pass",
"def _write_image(self):\r\n # Create an output raster with the correct number of rows and columns.\r\n gtiff_driver = gdal.GetDriverByName('GTiff')\r\n out_ds = gtiff_driver.Create(os.path.join(self.out_folder, self.out_file_name), self.column, self.row, 1)\r\n out_ds.SetProjection(self.in_ds.GetProjection())\r\n\r\n # Convert the offsets to real-world coordinates for the georeferencing info.\r\n # We can't use the coordinates above because they don't correspond to the pixel edges.\r\n subset_ulx, subset_uly = gdal.ApplyGeoTransform(self.in_gt, self.off_ulx, self.off_uly)\r\n out_gt = list(self.in_gt)\r\n out_gt[0] = subset_ulx\r\n out_gt[3] = subset_uly\r\n out_ds.SetGeoTransform(out_gt)\r\n\r\n data = self.read_image()\r\n out_band = out_ds.GetRasterBand(1)\r\n out_band.WriteArray(data)\r\n\r\n del out_ds",
"def output_image_size(n_patches_x, n_patches_y, patch_size):\n width = n_patches_x * patch_size\n height = n_patches_y * patch_size\n return width, height",
"def write_greyscale(filename,pixels):\n height=len(pixels)\n width=len(pixels[0])\n with open(filename,'wb') as f:\n # BMP Header\n f.write(b'BM')\n size_bookmark=f.tell()\n f.write(b'\\x00\\x00\\x00\\x00')\n f.write(b'\\x00\\x00')\n f.write(b'\\x00\\x00')\n pixel_offset_bookmark=f.tell()\n f.write(b'\\x00\\x00\\x00\\x00')\n # PIXEL HEADER 40 BYTES\n f.write(b'\\x28\\x00\\x00\\x00')\n f.write(_int_to_bytes(width))\n f.write(_int_to_bytes(height))\n f.write(b'\\x01\\x00') #01 number of image planes, 08 bits per pixel\n f.write(b'\\x08\\x00') #01 number of image planes, 08 bits per pixel\n f.write(b'\\x00\\x00\\x00\\x00') # Internal bmp\n f.write(b'\\x00\\x00\\x00\\x00') # Internal bmp\n f.write(b'\\x00\\x00\\x00\\x00') # Internal bmp\n f.write(b'\\x00\\x00\\x00\\x00') # Internal bmp\n f.write(b'\\x00\\x00\\x00\\x00') # Internal bmp\n f.write(b'\\x00\\x00\\x00\\x00') # Internal bmp\n \n # Color palatte - linear greyscale\n for c in range(256):\n f.write(bytes((c,c,c,0))) #Blue,green,red,zero\n \n pixel_data_bookmark = f.tell()\n for row in reversed(pixels):\n row_data=bytes(row)\n f.write(row_data)\n padding=b'\\x00' * ( 4 - (len(row)%4))\n f.write(padding)\n \n #EOF\n eof_bookmark = f.tell()\n\n # Fill size placeholder\n f.seek(size_bookmark)\n f.write(_int_to_bytes(eof_bookmark))\n f.seek(pixel_offset_bookmark)\n f.write(_int_to_bytes(pixel_data_bookmark))",
"def test_write_rgb(self):\n with tempfile.TemporaryDirectory() as out_dir:\n image_name = os.path.join(out_dir, \"test.png\")\n img = np.random.rand(2, 3, 3)\n img_save_val = (255 * img).astype(np.uint8)\n writer_obj = PILWriter(output_dtype=np.uint8)\n writer_obj.set_data_array(img, channel_dim=-1)\n writer_obj.write(image_name)\n out = np.asarray(Image.open(image_name))\n out = np.moveaxis(out, 0, 1)\n np.testing.assert_allclose(out, img_save_val)",
"def save_file(self, _filename):\n imgsize = (self.__resolution[0], self.__resolution[1])\n print imgsize\n\n if(self.__resolution[2] == 1):\n # grayscale -> convert to RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n # duplicate the channels\n ucharcol = (255 * col[0], 255 * col[0], 255 * col[0])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 3):\n # RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n ucharcol = (255 * col[0], 255 * col[1], 255 * col[2])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 4):\n # RGBA\n bg_white = (255, 255, 255, 255)\n img = Image.new(\"RGBA\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = 255 * self.get_color((x, y))\n ucharcol = (int(col[0]), int(col[1]), int(col[2]), int(col[3]))\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n else:\n raise StandardError, ('supported number of channels are 1, 3, and 4, only.')\n\n img.save(_filename)",
"def imwrite(image, path):\n return scipy.misc.imsave(path, to_range(image, 0, 255, np.uint8))",
"def pixel(self, x: int, y: int, colour: int, /) -> None:",
"def pixel(self, x, y, color):\n self._set_window(x, y, x, y)\n self._write(None, _encode_pixel(color))",
"def set_pixel_size(self, pixel_size):\n raise NotImplementedError",
"def pixel( self, x, y, c = '#ffffff' ):\n self.raster.put( c, ( x, y ) )",
"def segmentation_write(filename,segmentation):\n\n segmentation_ = segmentation.astype('int32')\n seg_r = np.floor(segmentation_ / (256**2)).astype('uint8')\n seg_g = np.floor((segmentation_ % (256**2)) / 256).astype('uint8')\n seg_b = np.floor(segmentation_ % 256).astype('uint8')\n\n out = np.zeros((segmentation.shape[0],segmentation.shape[1],3),dtype='uint8')\n out[:,:,0] = seg_r\n out[:,:,1] = seg_g\n out[:,:,2] = seg_b\n\n Image.fromarray(out,'RGB').save(filename,'PNG')",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n im.save(filename)\n return 'saved'",
"def draw(self, frame):\n xpos = OFS + self.x * TILE_SIZE\n ypos = OFS + self.y * TILE_SIZE\n frame[ypos:ypos+TILE_SIZE, xpos:xpos+TILE_SIZE] = self.image",
"def writeImage(image, filename):\n Sky = [128,128,128]\n Building = [128,0,0]\n Pole = [192,192,128]\n Road_marking = [255,69,0]\n Road = [128,64,128]\n Pavement = [60,40,222]\n Tree = [128,128,0]\n SignSymbol = [192,128,128]\n Fence = [64,64,128]\n Car = [64,0,128]\n Pedestrian = [64,64,0]\n Bicyclist = [0,128,192]\n Unlabelled = [0,0,0]\n r = image.copy()\n g = image.copy()\n b = image.copy()\n label_colours = np.array([Sky, Building, Pole, Road_marking, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])\n for l in range(0,12):\n r[image==l] = label_colours[l,0]\n g[image==l] = label_colours[l,1]\n b[image==l] = label_colours[l,2]\n rgb = np.zeros((image.shape[0], image.shape[1], 3))\n rgb[:,:,0] = r/1.0\n rgb[:,:,1] = g/1.0\n rgb[:,:,2] = b/1.0\n im = Image.fromarray(np.uint8(rgb))\n # im.save('/Users/koheiyamamoto/Desktop/SegNet/out/' + filename)\n im.save('./out/' + filename)",
"def tiled_writing(red, nir, output):\n \n #open datasets\n src_red = rio.open(red)\n src_nir = rio.open(nir)\n \n #define raster properies and update datatype\n meta = src_red.meta.copy()\n meta.update({'dtype':'float32'}) # meta is a dictionary\n outfile = output\n #open outfile in writing mode with the properties of defined raster band\n with rio.open(outfile, 'w', **meta) as dst:\n #iterate over blocks of the bands, calculate ndvi for each block \n # and put the blocks back together\n for window in calc_tiles(src_red, tile_size_x, tile_size_y):\n red_block = src_red.read(window=window, masked=True)\n nir_block = src_nir.read(window=window, masked=True)\n #cast ndarrays to Float32 type\n red = red_block.astype('f4')\n nir = nir_block.astype('f4')\n #allow division by zero\n np.seterr(divide='ignore', invalid='ignore')\n #calculate ndvi and write raster\n ndvi = (nir - red) / (nir + red)\n dst.write(ndvi, window=window)\n\n #close dataset\n src_red.close()\n src_nir.close()\n return outfile",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)"
] |
[
"0.6373138",
"0.6343465",
"0.6338091",
"0.616885",
"0.61488277",
"0.6131269",
"0.6082062",
"0.60536015",
"0.6052037",
"0.60322607",
"0.6015646",
"0.6009837",
"0.60093296",
"0.6004077",
"0.59644717",
"0.5935781",
"0.5928668",
"0.58588254",
"0.58426064",
"0.58031386",
"0.577759",
"0.5738793",
"0.5737097",
"0.57273924",
"0.570582",
"0.5695264",
"0.5694762",
"0.56937677",
"0.56678265",
"0.566102"
] |
0.7475846
|
0
|
loops through the given range coloring the pixels
|
def write_to_range(start_x, end_x, start_y, end_y, color, image, img_size):
for curr_x in range(start_x, end_x, 1):
for curr_y in range(start_y, end_y, 1):
if curr_x > 0 and curr_y > 0:
if curr_x < img_size and curr_y < img_size:
image.putpixel((curr_x, curr_y), color)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setColorBarRange(start=1,end=254):\n dislin.colran(start,end)",
"def color(self, data):\n\n red = np.interp(data, self.range, self.r)\n blue = np.interp(data, self.range, self.b)\n green = np.interp(data, self.range, self.g)\n # Style plot to return a grey color when value is 'nan'\n red[np.isnan(red)] = 240\n blue[np.isnan(blue)] = 240\n green[np.isnan(green)] = 240\n colors = np.dstack([red.astype(np.uint8),\n green.astype(np.uint8),\n blue.astype(np.uint8),\n np.full_like(data, 255, dtype=np.uint8)])\n #return colors.view(dtype=np.uint32).reshape(data.shape)\n c=[]\n for i in range(len(data)):\n c.append([red[i],green[i],blue[i]])\n return c",
"def calculate_pixels(self, lower, upper):\n\n lower_range = np.array(lower)\n upper_range = np.array(upper)\n\n mask = cv2.inRange(self.hsv, lower_range, upper_range)\n return (mask == 255).sum()",
"def color_group(max_range):\n\n color = []\n\n for _ in range(0, max_range):\n col = []\n col.append(random.random() % 1)\n col.append(random.random() % 1)\n col.append(random.random() % 1)\n color.append(col)\n\n dist_table = []\n\n for idx in range(0, max_range):\n dist_table.append([color_distance(color[idx], x) for x in color[:]])\n\n for _ in range(0, 50):\n for idx_start in range(0, max_range):\n global_point_distance = sum(dist_table[idx_start])\n tmp_dist_table = dist_table[idx_start][:]\n tmp_table = color[:]\n for idx_end in range(0, max_range):\n tmp_table[idx_end] = mutate_color(color[idx_end])\n tmp_dist_table[idx_end] = color_distance(\n color[idx_start],\n color[idx_end])\n if sum(tmp_dist_table) > global_point_distance:\n dist_table[idx_start] = tmp_dist_table[:]\n color = tmp_table[:]\n\n #for index in range(0, len(color)):\n # color[index] = hls_to_rgb(\n # color[index][0],\n # color[index][1],\n # color[index][2])\n\n return color",
"def graphColorRange(min, minColor, max, maxColor, numSlices=96, id=[0]):\n l = (graphColorSlice(\"-INF\", min, minColor, id=id) +\n graphColorSlice(max, \"INF\", maxColor, id=id))\n prevAlpha = 0\n while prevAlpha < 1:\n alpha = prevAlpha + 1.0 / numSlices\n l += graphColorSlice(prevAlpha*(max-min)+min,\n alpha*(max-min)+min,\n minColor.hsvBlend(maxColor, (alpha + prevAlpha) * 0.5),\n id=id)\n prevAlpha = alpha\n return l",
"def extract_single_color_range(image,hsv,lower,upper):\n mask = cv2.inRange(hsv, lower, upper)\n res = cv2.bitwise_and(image,image, mask= mask)\n return res",
"def create_range_map(points_xyz: NDArrayFloat) -> NDArrayByte:\n range = points_xyz[..., 2]\n range = np.round(range).astype(int)\n color = plt.get_cmap(\"turbo\")(np.arange(0, range.max() + 1))\n color = color[range]\n range_cmap: NDArrayByte = (color * 255.0).astype(np.uint8)\n return range_cmap",
"def one_color(image,color=[0,0,255]):\r\n output = image.copy()\r\n for line in range(len(image)):\r\n for column in range(len(image[0])):\r\n distance = calc_distance(color,image[line][column])\r\n if distance <=150:\r\n output[line][column]=[255,255,255]\r\n else:\r\n output[line][column]=[0,0,0]\r\n return output",
"def set_color_range(mic, N, indx, mat, quat, rod):\r\n first = True\r\n #print(indx)\r\n for i in range(N):\r\n if i in indx:\r\n mat[i,:,:] = RotRep.EulerZXZ2Mat(mic.snp[i,6:9]/180.0*np.pi)\r\n quat[i,:] = RotRep.quaternion_from_matrix(mat[i,:,:])\r\n rod[i,:] = RotRep.rod_from_quaternion(quat[i,:])\r\n if first:\r\n maxr = rod[i,0]\r\n minr = rod[i,0]\r\n maxg = rod[i,1]\r\n ming = rod[i,1]\r\n maxb = rod[i,2]\r\n minb = rod[i,2]\r\n maxri = i\r\n minri = i\r\n maxgi = i\r\n mingi = i\r\n maxbi = i\r\n minbi = i\r\n first = False\r\n else:\r\n if rod[i,0] > maxr:\r\n maxr = rod[i,0]\r\n maxri = i\r\n elif rod[i,0] < minr:\r\n minr = rod[i,0]\r\n minri = i\r\n if rod[i,1] > maxg:\r\n maxg = rod[i,1]\r\n maxgi = i\r\n elif rod[i,1] < ming:\r\n ming = rod[i,1]\r\n mingi = i\r\n if rod[i,2] > maxb:\r\n maxb = rod[i,2]\r\n maxbi = i\r\n elif rod[i,2] < minb:\r\n minb = rod[i,2]\r\n minbi = i\r\n else:\r\n rod[i,:]=[0.0,0.0,0.0]\r\n #print(\"Current rod values: \",rod)\r\n maxrgb = [maxr,maxg,maxb]\r\n minrgb = [minr,ming,minb]\r\n maxangs = [rod[maxri,0],rod[maxgi,1],rod[maxbi,2]]\r\n minangs = [rod[minri,0],rod[mingi,1],rod[minbi,2]]\r\n colors = rod\r\n for j in range(N):\r\n for k in range(0,3):\r\n colors[j,k] = (rod[j,k]-minrgb[k])/(maxrgb[k]-minrgb[k])\r\n return colors, maxangs, minangs",
"def rgb_range(col1, col2, n=255, cmap=None, pow=1):\n colr = [rgb_blend(col1, col2, (float(i)/float(n-1))**pow)\n for i in range(n)]\n if cmap is not None:\n import matplotlib.colors as col\n import matplotlib.cm as cm\n iscmap = col.ListedColormap(colr, name=cmap, N=n)\n cm.register_cmap(name=cmap, cmap=iscmap)\n return colr",
"def colors(im=None):\n if im is None:\n path = \"C:\\\\Users\\\\2053_HSUF\\\\PycharmProjects\\\\dialtones\\\\pics\\\\76aa16c9_playstore.png\"\n im = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n for shade in range(13):\n colorized = cv2.applyColorMap(im, shade)\n cv2.imshow(\"yo\", colorized)\n cv2.waitKey(0)\n cv2.destroyAllWindows()",
"def split_colors(self, color_count, color_from, color_to):\n colors = []\n for c in range(3):#RGB\n step = np.abs(color_from[c] - color_to[c])/color_count\n if step:\n if color_from[c]>color_to[c]:\n color = np.arange(color_from[c],color_to[c],-step)\n else:\n color = np.arange(color_from[c],color_to[c],step)\n else:\n color = [color_from[c] for i in np.arange(color_count)]\n\n\n colors.append(color)\n colors = [(a,b,c) for a,b,c in zip(colors[0],colors[1],colors[2])]\n return colors",
"def sane(im,rangeTuple,y,start,end):\r\n #im = Image.open(pageImage)\r\n print(im.n_frames)\r\n lst = []\r\n for pg in range(rangeTuple[0],rangeTuple[1]+1):\r\n acc = True \r\n im.seek(pg)\r\n pix = im.load()\r\n for i in range(start,end):\r\n if(pix[i,y][0]!=0 or pix[i,y][1]!=255):\r\n acc = False \r\n break\r\n if(acc):\r\n lst.append(pg)\r\n return lst",
"def pixel(self, x: int, y: int, colour: int, /) -> None:",
"def negative(img): \n for pixel in img:\n x, y, col = pixel \n r, g, b = col\n \n new_color = create_color(255 - r, 255 - g, 255 - b)\n set_color(img, x, y, new_color)",
"def pixelIter():\n\t\t\tx,y,i = 0,0,0\n\t\t\tfor i,c in enumerate(space):\n\t\t\t\tx = i % w\n\t\t\t\ty = i / w\n\t\t\t\tisSolid = (c=='#')\n\t\t\t\tyield x,y,i,isSolid",
"def display_cmap_color_range(cmap_style='rainbow'):\n cmap = plt.get_cmap(cmap_style)\n for c in range(256):\n plt.scatter([c], [0], s=500, c=cmap(c), lw=0)\n plt.show()",
"def fill_rgb(self, r, g, b, start=0, end=0):\n if start < 0:\n start = 0\n if end == 0 or end > self.last_index:\n end = self.last_index\n for led in range(start, end + 1): # since 0-index include end in range\n self.__set_internal(led, r, g, b)",
"def averageColorInRegion(self,x1,y1,x2,y2,skip_factor):\n \n\n rgb = [0, 0, 0, 0]\n temp = [0, 0, 0, 0]\n pixels = abs(((x2-x1) / skip_factor) * ((y2-y1) / skip_factor))\n\n #switching endpoints so iteration is positive\n if (x1 > x2):\n temp = x2\n x2 = x1\n x1 = temp\n\n if (y1 > y2):\n temp = y2\n y2 = y1\n y1 = temp\n\n for i in range(x1, x2, skip_factor):\n for j in range(y1, y2, skip_factor):\n temp = self.pixel(i, j)\n \n #rgb[0] += temp[0] * temp[3]/255 #Sum plus alpha correction\n #rgb[1] += temp[1] * temp[3]/255\n #rgb[2] += temp[2] * temp[3]/255\n #rgb[3] += temp[3]\n\n rgb[0] += temp[0] \n rgb[1] += temp[1]\n rgb[2] += temp[2] \n\n for i in range(4):\n rgb[i] = int(rgb[i] / pixels * brightness)\n #rgb[i] = int( (rgb[i] / pixels * brightness) * alpha)\n if (rgb[i] > 255):\n #cutting off at 255 - need to find the problem later\n rgb[i] = 255\n\n #if (rgb[i] < 20):\n # rgb[i] = 0\n \n\n return rgb",
"def _color_sample(img: np.ndarray, p: float = 0.05) -> np.ndarray:\n # combine the X and Y dimension into one, only keep the channels dimension\n ravelled = img.reshape(-1, 3)\n # for 5%, take every 20th value, for 10% every 10th, etc...\n every_nth = int(1 / p)\n return ravelled[::every_nth, :]",
"def color(step: int=10) -> Tuple[int, int, int]:\n # Randomly seed the r g b values\n r, g, b = (random_uniform(0, 255), random_uniform(0, 255),\n random_uniform(0, 255))\n\n # Randomly determine if each r g and b value is increasing or not\n r_inc = True\n g_inc = True\n b_inc = True\n r_step = random_uniform(step)\n g_step = random_uniform(step)\n b_step = random_uniform(step)\n\n # Yield the initial r, g, b values\n yield r, g, b\n\n # Loop and yeild forever\n while True:\n # If r is increasing\n if r_inc:\n # Increment r by the step\n r += r_step\n # Ensure that the next step will be within the limits\n # if not then set the flag to decreasing\n r_inc = r < 255 - r_step\n # If r is decreasing\n else:\n # Decrement r by the step\n r -= r_step\n # Ensure that the next step will be within the limits\n # if not then set the flag to increasing\n r_inc = r < r_step\n\n # See above\n if g_inc:\n g += g_step\n g_inc = g < 255 - g_step\n else:\n g -= g_step\n g_inc = g < g_step\n\n # See above\n if b_inc:\n b += b_step\n b_inc = b < 255 - b_step\n else:\n b -= b_step\n b_inc = b < b_step\n\n # Yield the red, green, and blue values\n yield r, g, b",
"def grayscale(img):\n for pixel in img:\n x, y, col = pixel\n r, g, b = col\n \n r = (r + g + b)/3\n r = g = b\n \n new_color = create_color(r, g, b)\n set_color(img, x, y, new_color)",
"def pixelIter():\n\t\t\tx,y,i = 0,0,0\n\t\t\tfor i,c in enumerate(space):\n\t\t\t\tx = i % w\n\t\t\t\ty = i / w\n\t\t\t\tisSolid = (c=='#')\n\t\t\t\tyield x,y,i,isSolid\n\t\t\t\tprintSpace(x,y) # print state for debugging",
"def extract_single_color_range(self, image,hsv,lower,upper):\n mask = cv2.inRange(hsv, lower, upper)\n res = cv2.bitwise_and(image,image, mask= mask)\n\n res = cv2.medianBlur(res, 5)\n return res",
"def get_color(in_val, min_val=0, max_val=100):\n width = max_val - min_val\n unit = width / len(continuum)\n return continuum[min(int(in_val / unit), 19)]",
"def falso_color(img):\n rows,cols = img.shape\n img_red = np.copy(img)\n img_green = np.copy(img)\n img_blue = np.copy(img)\n img_false = np.zeros((rows, cols, 3), dtype=np.uint8)\n\n for i in range(0,rows):\n for j in range(0,cols):\n\n if (0 <= img[i, j] <= 43):\n img_red[i, j] = 255\n img_green[i, j] = img[i, j] * (255 / 43)\n img_blue[i, j] = 0\n\n elif(43 < img[i, j] <= 86):\n img_red[i, j] = (255 - (img[i, j] - 43) * (255 / 43))\n img_green[i, j] = 255\n img_blue[i,j] = 0\n\n elif(86 < img[i, j] <= 128):\n img_red[i, j] = 0\n img_green[i, j] = 255\n img_blue[i, j] = ((img[i, j] - 86) * (255 / 42))\n\n elif(128<img[i, j]<=171):\n img_red[i, j] = 0\n img_green[i, j] = ((171 - img[i, j]) * (255 / 43))\n img_blue[i, j] = 255\n\n elif(171 < img[i, j] <= 214):\n img_red[i, j] = (img[i, j] - 171) * (255 / 43)\n img_green[i, j] = 0\n img_blue[i, j] = 255\n\n elif(214 < img[i, j]):\n img_red[i, j] = 255\n img_green[i, j] = 0\n img_blue[i, j] = ((255 - img[i, j]) * (255 / 41))\n\n img_false[:, :, 0] = img_red\n img_false[:, :, 1] = img_green\n img_false[:, :, 2] = img_blue\n\n return img_false",
"def get_color_in_region(self, start, end):\n # Input format: (start_x, start_y), (end_x, end_y)\n start_x, start_y = start\n end_x, end_y = end\n\n # x and y are flipped\n crop_img = self.img[start_x:(end_x + 1), start_y:(end_y + 1)]\n channels = cv2.mean(crop_img)\n\n # Return BGR\n return channels[0], channels[1], channels[2]",
"def setColors(self):\n #productive\n profprint()\n self.color= [[0,0,0] for i in range(205)]\n self.color255= self.setColors255()\n for i in range(205):\n for j in range(3):\n self.color[i][j] = self.color255[i][j]/float(255)\n\n return self.color",
"def intensityPSF_Blues(N=1000):\n col_seq = [( 0/255., 20/255., 80/255.), ( 8/255., 48/255., 107/255.),\n ( 8/255., 81/255., 156/255.), ( 33/255., 113/255., 181/255.),\n ( 66/255., 146/255., 198/255.), (107/255., 174/255., 214/255.),\n (158/255., 202/255., 225/255.), (198/255., 219/255., 239/255.),\n (222/255., 235/255., 247/255.), (247/255., 251/255., 255/255.)]\n\n cdict = {'red': ((0.00, col_seq[0][0], col_seq[0][0]),\n (0.02, col_seq[1][0], col_seq[1][0]),\n (0.06, col_seq[2][0], col_seq[2][0]),\n (0.10, col_seq[3][0], col_seq[3][0]),\n (0.20, col_seq[4][0], col_seq[4][0]),\n (0.30, col_seq[5][0], col_seq[5][0]),\n (0.50, col_seq[6][0], col_seq[6][0]),\n (0.75, col_seq[7][0], col_seq[7][0]),\n (0.90, col_seq[8][0], col_seq[8][0]),\n (1.00, col_seq[9][0], col_seq[9][0])),\n 'green': ((0.00, col_seq[0][1], col_seq[0][1]),\n (0.02, col_seq[1][1], col_seq[1][1]),\n (0.06, col_seq[2][1], col_seq[2][1]),\n (0.10, col_seq[3][1], col_seq[3][1]),\n (0.20, col_seq[4][1], col_seq[4][1]),\n (0.30, col_seq[5][1], col_seq[5][1]),\n (0.50, col_seq[6][1], col_seq[6][1]),\n (0.75, col_seq[7][1], col_seq[7][1]),\n (0.90, col_seq[8][1], col_seq[8][1]),\n (1.00, col_seq[9][1], col_seq[9][1])),\n 'blue': ((0.00, col_seq[0][2], col_seq[0][2]),\n (0.02, col_seq[1][2], col_seq[1][2]),\n (0.06, col_seq[2][2], col_seq[2][2]),\n (0.10, col_seq[3][2], col_seq[3][2]),\n (0.20, col_seq[4][2], col_seq[4][2]),\n (0.30, col_seq[5][2], col_seq[5][2]),\n (0.50, col_seq[6][2], col_seq[6][2]),\n (0.75, col_seq[7][2], col_seq[7][2]),\n (0.90, col_seq[8][2], col_seq[8][2]),\n (1.00, col_seq[9][2], col_seq[9][2]))}\n\n psfblues = _mplb.colors.LinearSegmentedColormap('psfblues', cdict, N)\n return psfblues",
"def colortopalette(color, palette):\n for a,b in palette:\n if color >= a and color < b:\n return b"
] |
[
"0.6508234",
"0.64523953",
"0.640964",
"0.63950825",
"0.63663226",
"0.62469196",
"0.6158317",
"0.6139538",
"0.6075288",
"0.6061774",
"0.60614336",
"0.6053607",
"0.60478157",
"0.60340863",
"0.59674525",
"0.5940123",
"0.5936041",
"0.5923042",
"0.591694",
"0.5907577",
"0.5906161",
"0.588923",
"0.58477914",
"0.5834614",
"0.5821324",
"0.5807498",
"0.5802034",
"0.57914746",
"0.57818294",
"0.5780481"
] |
0.724895
|
0
|
Merge two sorted iterators, return a list
|
def merge_two_iterators(itr1: Iterator, itr2: Iterator) -> List:
result = []
elem1, elem2 = next(itr1), next(itr2)
def rest_elems(itr):
for _, el in enumerate(itr):
result.append(el)
while True:
if elem1 < elem2:
result.append(elem1)
try:
elem1 = next(itr1)
except StopIteration:
result.append(elem2)
rest_elems(itr2)
return result
elif elem1 == elem2:
result.append(elem1)
result.append(elem2)
try:
elem1 = next(itr1)
except StopIteration:
rest_elems(itr2)
return result
try:
elem2 = next(itr2)
except StopIteration:
rest_elems(itr1)
return result
else:
result.append(elem2)
try:
elem2 = next(itr2)
except StopIteration:
result.append(elem1)
rest_elems(itr1)
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def merge(a, b):\n result = []\n\n # Append smallest values to result until either list is exhausted\n i = j = 0\n while i < len(a) and j < len(b):\n if compare(a[i], b[j]) < 0:\n result.append(a[i])\n i += 1\n else:\n result.append(b[j])\n j += 1\n\n # Append all remaining values from the unexhausted list\n if i < len(a):\n result.extend(a[i:])\n else:\n result.extend(b[j:])\n\n return result",
"def merge(self,iterable1, iterable2):\r\n stopItreation=-20000000\r\n iterator1 = iter(iterable1)\r\n iterator2 = iter(iterable2)\r\n object_from_iter1 = next(iterator1, stopItreation)\r\n object_from_iter2 = next(iterator2, stopItreation)\r\n obj=stopItreation\r\n while object_from_iter1 != stopItreation and object_from_iter2 != stopItreation:\r\n if object_from_iter1 >= object_from_iter2:\r\n temp = object_from_iter2\r\n object_from_iter2 = next (iterator2, stopItreation)\r\n else:\r\n temp = object_from_iter1\r\n object_from_iter1 = next (iterator1, stopItreation)\r\n yield temp\r\n\r\n if object_from_iter1 == stopItreation and object_from_iter2 != stopItreation:\r\n lastitertor = iterator2\r\n obj=object_from_iter2\r\n elif object_from_iter1 !=stopItreation and object_from_iter2 == stopItreation:\r\n lastitertor = iterator1\r\n obj=object_from_iter1\r\n\r\n while obj != stopItreation:\r\n yield obj\r\n obj = next(lastitertor,stopItreation)",
"def merge(list1, list2):\n res = []\n index_i, index_j = 0, 0\n while index_i < len(list1) and index_j < len(list2):\n if list1[index_i] <= list2[index_j]:\n res.append(list1[index_i])\n index_i += 1\n else:\n res.append(list2[index_j])\n index_j += 1\n res += list1[index_i:]\n res += list2[index_j:]\n return res",
"def merge_lists_w_ordering(a: List[Any], b: List[Any]) -> List[Any]:\n overlap = set(a).intersection(b)\n\n result = []\n\n current, other = iter(a), iter(b)\n\n while True:\n for element in current:\n if element in overlap:\n overlap.discard(element)\n other, current = current, other\n break\n\n result.append(element)\n else:\n result.extend(other)\n break\n\n return result",
"def merge(lst1, lst2):\n\n results = []\n i = 0\n j = 0\n\n while i <= len(lst1) - 1 and j <= len(lst2) - 1:\n\n if lst1[i] < lst2[j]:\n results.append(lst1[i])\n i += 1\n else:\n results.append(lst2[j])\n j += 1\n\n if i == len(lst1):\n results.extend(lst2[j:])\n else:\n results.extend(lst1[i:])\n\n return results",
"def merge(list1, list2): \n result = []\n copy1, copy2 = list1[:], list2[:]\n \n while min(copy1, copy2):\n if copy1[0] < copy2[0]:\n result.append(copy1[0])\n copy1.pop(0)\n else:\n result.append(copy2[0])\n copy2.pop(0)\n \n if copy1:\n result += copy1\n elif copy2:\n result += copy2\n \n return result",
"def merge(list_a, list_b):\n new_list = []\n i = 0\n j = 0\n while (i < len(list_a) and j < len(list_b)):\n if(list_a[i] < list_b[j]):\n new_list.append(list_a[i])\n i += 1\n else:\n new_list.append(list_b[j])\n j += 1\n new_list += list_a[i:]\n new_list += list_b[j:]\n\n return new_list",
"def merge(l1, l2):\n i = j = 0\n output = []\n\n while i < len(l1) and j < len(l2):\n if l1[i] <= l2[j]:\n output.append(l1[i])\n i += 1\n else:\n output.append(l2[j])\n j += 1\n\n output.extend(l1[i:] + l2[j:])\n\n return output",
"def _merge_two_sorted_list(sorted_list_head, sorted_list_tail):\n sorted_list_result = list()\n head_index = 0\n tail_index = 0\n len_head = len(sorted_list_head)\n len_tail = len(sorted_list_tail)\n\n while head_index < len_head and tail_index < len_tail:\n print(sorted_list_head, ' : ', sorted_list_tail)\n if sorted_list_head[head_index] < sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_head[head_index])\n head_index += 1\n elif sorted_list_head[head_index] > sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_tail[tail_index])\n tail_index += 1\n elif sorted_list_head[head_index] == sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_head[head_index])\n sorted_list_result.append(sorted_list_tail[tail_index])\n head_index += 1\n tail_index += 1\n\n if head_index < len_head:\n sorted_list_result.extend(sorted_list_head[head_index:])\n elif tail_index < len_tail:\n sorted_list_result.extend(sorted_list_tail[tail_index:])\n\n return sorted_list_result",
"def merge(list_1, list_2):\n l1, l2 = len(list_1), len(list_2) # Store the length of each list\n merged_output = [None for i in range(l1 + l2)]\n i, j = 0, 0\n # Compare each element of the two lists till one of them is exhausted\n while i < l1 and j < l2:\n if list_1[i] <= list_2[j]:\n merged_output[i + j] = list_1[i]\n i += 1\n else:\n merged_output[i + j] = list_2[j]\n j += 1\n\n # Check if list_1 is exhausted, add remaining element to the output\n for j in range(j, l2):\n merged_output[i + j] = list_2[j]\n\n # Check if list_2 is exhausted, add remaining element to the output\n for i in range(i, l1):\n merged_output[i + j] = list_1[i]\n\n # print(merged_output)\n return merged_output",
"def merge(list1: list, list2: list) -> list:\n output = []\n i, j = 0, 0\n while i < len(list1) and j < len(list2):\n if list1[i][1] <= list2[j][1]:\n output += [list1[i]]\n i += 1\n else:\n output += [list2[j]]\n j += 1\n return output + list1[i:] + list2[j:]",
"def merge ( list1, list2 ):\n new_list = []\n while len(list1)>0 and len(list2)>0:\n if list1[0] < list2[0]:\n new_list.append (list1[0])\n del list1[0]\n else:\n new_list.append (list2[0])\n del list2[0]\n return new_list + list1 + list2",
"def merge(list1, list2):\n result_list = []\n list1_length = len(list1)\n list2_length = len(list2)\n list1_index = 0\n list2_index = 0\n while list1_index < list1_length and list2_index < list2_length:\n if list1[list1_index] <= list2[list2_index]:\n result_list.append(list1[list1_index])\n list1_index = list1_index + 1\n else:\n result_list.append(list2[list2_index])\n list2_index = list2_index + 1\n \n if list1_index < list1_length:\n result_list.extend(list1[list1_index:])\n if list2_index < list2_length:\n result_list.extend(list2[list2_index:])\n \n return result_list",
"def merge(left, right):\n ret = []\n li = ri = 0\n while li < len(left) and ri < len(right):\n if left[li] <= right[ri]:\n ret.append(left[li])\n li += 1\n else:\n ret.append(right[ri])\n ri += 1\n if li == len(left):\n ret.extend(right[ri:])\n else:\n ret.extend(left[li:])\n return ret",
"def merge_two(l, r):\n new = []\n i1, i2 = 0, 0\n while i1 != len(l) and i2 != len(r):\n if l[i1] < r[i2]:\n new.append(l[i1])\n i1 += 1\n else:\n new.append(r[i2])\n i2 += 1\n\n new.extend(l[i1:])\n new.extend(r[i2:])\n return new",
"def merge(list1: list, list2: list) -> list:\r\n result = []\r\n i = 0\r\n j = 0\r\n # Iterate through each element and append the smaller element of each list to the resulting list.\r\n while i < len(list1) and j < len(list2):\r\n if list1[i] < list2[j]:\r\n result.append(list1[i])\r\n i += 1\r\n else:\r\n result.append(list2[j])\r\n j += 1\r\n\r\n # Append the remaining lists to the resulting list.\r\n result.extend(list1[i:])\r\n result.extend(list2[j:])\r\n return result",
"def merge(a, b):\r\n # your code here\r\n \r\n m = []\r\n i, j = 0, 0\r\n \r\n while i < len(a) and j < len(b):\r\n if a[i] < b[j]:\r\n m.append(a[i])\r\n i += 1\r\n else:\r\n m.append(b[j])\r\n j += 1\r\n \r\n m += a[i:] + b[j:]\r\n \r\n return m",
"def merge_lists(a_lst, b_lst):\n\n i = 0\n j = 0\n merged_list = []\n while i < len(a_lst) and j < len(b_lst):\n \n if a_lst[i] < b_lst[j]:\n merged_list.append(a_lst[i])\n i += 1\n else:\n merged_list.append(b_lst[j])\n j += 1\n if i < len(a_lst):\n merged_list.extend(a_lst[i:])\n if j < len(b_lst):\n merged_list.extend(b_lst[j:])\n return merged_list",
"def merge(list1, list2):\n answer = []\n assert answer == sorted(answer)\n\n idx1 = 0\n idx2 = 0\n while (idx1 < len(list1)) and (idx2 < len(list2)):\n if list1[idx1] < list2[idx2]:\n answer.append(list1[idx1])\n idx1 += 1\n elif list1[idx1] > list2[idx2]:\n answer.append(list2[idx2])\n idx2 += 1\n else:\n answer.append(list1[idx1])\n answer.append(list2[idx2])\n idx1 += 1\n idx2 += 1\n assert answer == sorted(answer)\n\n answer.extend(list1[idx1:])\n answer.extend(list2[idx2:])\n\n assert answer == sorted(answer)\n return answer",
"def merge(a,b):\n c = []\n while len(a) != 0 and len(b) != 0:\n if a[0] < b[0]:\n c.append(a[0])\n a.remove(a[0])\n else:\n c.append(b[0])\n b.remove(b[0])\n if len(a) == 0:\n c += b\n else:\n c += a\n return c",
"def merge(left, right):\n aList = []\n lt = 0\n rt = 0\n\n #Repeatedly move the smallest of left and right to the new list\n while lt < len(left) and rt < len(right):\n if left[lt] < right[rt]:\n aList.append(left[lt])\n lt += 1\n else:\n aList.append(right[rt])\n rt += 1\n\n #There will only be elements left in one of the original two lists.\n\n #Append the remains of left (lt..end) on to the new list.\n while lt < len(left):\n aList.append(left[lt])\n lt += 1\n \n #Append the remains of right (rt..end) on to the new list.\n while rt < len(right):\n aList.append(right[rt])\n rt += 1\n\n return aList",
"def merge(l1, l2):\n # Edge cases, where nothing is to be done.\n if l1 is None and l2 is None: return l1\n if l1 is None: return l2\n if l2 is None: return l1\n\n # Vars to hold,\n # head -> a dummy head to keep a reference to the start of the merged\n # list.\n # _iter -> to move through the merged list.\n head = ListNode(float('-inf'))\n _iter = head\n\n # As long as both the lists are not exhausted,\n while l1 and l2:\n\n # Make the next of _iter as the smaller node.\n if l1.val <= l2.val:\n _iter.next = l1\n l1 = l1.next\n else:\n _iter.next = l2\n l2 = l2.next\n # Move _iter forward.\n _iter = _iter.next\n\n # If either of the lists remain, add them to the end,\n # Note: at-least one of the lists would be exhausted by now,\n # and the remaining one is sorted in itself, which is why this works.\n if not l1: _iter.next = l2\n if not l2: _iter.next = l1\n\n # Return a reference to the start of the merged list.\n return head.next",
"def merge_ordered_list(in_list1: list, in_list2: list) -> list:\n _list1 = in_list1.copy()\n _list2 = in_list2.copy()\n _output_list = []\n idx_2 = 0\n for element in _list1:\n while idx_2 < len(_list2) and element > _list2[idx_2]:\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n _output_list.append(element)\n while idx_2 < len(_list2):\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n return _output_list",
"def merge_sort(a, b):\n l = []\n while a and b:\n if a[0] < b[0]:\n l.append(a.pop(0))\n else:\n l.append(b.pop(0))\n return l + a + b",
"def linear_merge(sorted1, sorted2):\n first_pointer = 0\n second_pointer = 0\n sorted_result = []\n\n while second_pointer < len(sorted2) and first_pointer < len(sorted1):\n if sorted1[first_pointer] < sorted2[second_pointer]:\n sorted_result.append(sorted1[first_pointer])\n first_pointer += 1\n else:\n sorted_result.append(sorted2[second_pointer])\n second_pointer += 1\n\n while second_pointer < len(sorted2):\n sorted_result.append(sorted2[second_pointer])\n second_pointer += 1\n\n while first_pointer < len(sorted1):\n sorted_result.append(sorted1[first_pointer])\n first_pointer += 1\n\n\n return sorted_result",
"def merge(list1, list2): \r\n if len(list1) == 0 or len(list2) == 0:\r\n new_list = [item for item in list1]\r\n new_list.extend(list2)\r\n return new_list\r\n else:\r\n if list1[0] <= list2[0]:\r\n new_list = list([list1[0]])\r\n new_list.extend(merge(list1[1:], list2))\r\n return new_list\r\n else:\r\n new_list = list([list2[0]])\r\n new_list.extend(merge(list1, list2[1:]))\r\n return new_list",
"def merge(a: List[int], b: List[int]) -> List[int]:\n merged = []\n i = j = 0\n alen = len(a)\n blen = len(b)\n while i < alen or j < blen:\n aval = a[i] if i < alen else float(\"inf\")\n bval = b[j] if j < blen else float(\"inf\")\n if aval <= bval:\n merged.append(a[i])\n i += 1\n else:\n merged.append(b[j])\n j += 1\n return merged",
"def merge(list1, list2):\n holding = list1.to_list()\n [holding.append(i) for i in list2.to_list()]\n # for i in list2.to_list():\n # holding.append(i)\n holding = sorted(holding)\n\n output = LinkedList(Node(holding[0]))\n for i in holding[1:]:\n output.append(i)\n return output",
"def concatenate(self, other):\n return as_stream_iterator(_flatten_stream_from_reversed_list([other, self]))",
"def _merge_lists(cls, li1, li2):\n if not li1:\n return li2[:]\n elif not li2:\n return li1[:]\n else:\n li = li1[:]\n for el in li2:\n if el not in li:\n li.append(el)\n return li"
] |
[
"0.71105134",
"0.7017018",
"0.6994239",
"0.69318044",
"0.692406",
"0.69093746",
"0.68480617",
"0.6843464",
"0.6825293",
"0.6777762",
"0.6776245",
"0.67496943",
"0.67142767",
"0.6700895",
"0.669669",
"0.6681653",
"0.6602123",
"0.6597254",
"0.6567569",
"0.6565212",
"0.6551639",
"0.65501535",
"0.6543988",
"0.65326947",
"0.6522941",
"0.6518899",
"0.6494421",
"0.6457084",
"0.64403594",
"0.6436872"
] |
0.8080191
|
0
|
Load a legacy XML project and raise a UserException if there was an error.
|
def load_xml(project, file_path):
tree = ElementTree()
try:
root = tree.parse(file_path)
except Exception as e:
raise UserException(
"there was an error reading the project file", str(e))
_assert(root.tag == 'Project',
"Unexpected root tag '{0}', 'Project' expected.".format(root.tag))
# Check the project version number.
version = root.get('version')
_assert(version is not None, "Missing 'version' attribute.")
try:
version = int(version)
except:
version = None
_assert(version is not None, "Invalid 'version'.")
if version < _MIN_VERSION:
raise UserException("the project's format is no longer supported")
if version > _LAST_VERSION:
raise UserException(
"the project's format is version {0} but only version {1} is "
"supported".format(version, _LAST_VERSION))
# The application specific configuration.
application = root.find('Application')
_assert(application is not None, "Missing 'Application' tag.")
project.application_entry_point = application.get('entrypoint', '')
project.application_is_console = _get_bool(application, 'isconsole',
'Application')
project.application_is_bundle = _get_bool(application, 'isbundle',
'Application')
project.application_name = application.get('name', '')
project.application_script = application.get('script', '')
project.sys_path = application.get('syspath', '')
# Any qmake configuration. This was added in version 5.
qmake_configuration = application.find('QMakeConfiguration')
if qmake_configuration is not None:
project.qmake_configuration = qmake_configuration.text
# Any application package.
app_package = application.find('Package')
if app_package is not None:
project.application_package = _load_package(app_package)
else:
project.application_package = QrcPackage()
# Any standard library modules.
project.parts = []
for stdlib_module_element in root.iterfind('StdlibModule'):
name = stdlib_module_element.get('name')
_assert(name is not None, "Missing 'StdlibModule.name' attribute.")
project.parts.append('Python:' + name)
# Any PyQt modules.
for pyqt_m in root.iterfind('PyQtModule'):
name = pyqt_m.get('name', '')
_assert(name != '', "Missing or empty 'PyQtModule.name' attribute.")
component_map = {
'Qsci': 'QScintilla',
'Qt3DAnimation': 'PyQt3D',
'Qt3DCore': 'PyQt3D',
'Qt3DExtras': 'PyQt3D',
'Qt3DInput': 'PyQt3D',
'Qt3DLogic': 'PyQt3D',
'Qt3DRender': 'PyQt3D',
'QtChart': 'PyQtChart',
'QtDataVisualization': 'PyQtDataVisualization',
'QtPurchasing': 'PyQtPurchasing',
'QtWebEngine': 'PyQtWebEngine',
'QtWebEngineCore': 'PyQtWebEngineCore',
'QtWebEngineWidgets': 'PyQtWebEngineWidgets',
'sip': 'SIP',
}
project.parts.append(
'{}:PyQt5.{}'.format(component_map.get(name, 'PyQt'), name))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_recover_from_bad_xml(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n jp2 = Jp2k(self._bad_xml_file)\n\n self.assertEqual(jp2.box[3].box_id, 'xml ')\n self.assertEqual(jp2.box[3].offset, 77)\n self.assertEqual(jp2.box[3].length, 64)\n self.assertEqual(ET.tostring(jp2.box[3].xml.getroot()),\n b'<test>this is a test</test>')",
"def test_negative_file_and_xml(self):\n xml_object = ET.parse(self.xmlfile)\n with self.assertRaises((IOError, OSError)):\n glymur.jp2box.XMLBox(filename=self.xmlfile, xml=xml_object)",
"def load(self, path):\n if not os.path.isfile(path):\n message = 'Error: The xml was not found, path to search: ' + path\n raise IOError(message)\n\n self._xml = Et.parse(path)\n self._path_to_xml = os.path.abspath(path)",
"def test_cannot_load_invalid_file(self):\n with self.assertRaises(urllib.error.URLError):\n _ = xmlschema.XMLSchema('does-not-exist.xsd')",
"def _check_deprecated_openerp_xml_node(self):\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n openerp_nodes = doc.xpath(\"/openerp\") \\\n if not isinstance(doc, string_types) else []\n if openerp_nodes:\n lineno = openerp_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True",
"def test_load_no_project():\n\n assert_raises(Exception, inventory.load, PROJECT_NAME)",
"def test_xml_safety_flag(self):\r\n\r\n self._setstaff_login()\r\n response = self._add_edx4edx()\r\n self.assertIn('GIT_IMPORT_WITH_XMLMODULESTORE', response.content)\r\n\r\n def_ms = modulestore()\r\n course = def_ms.courses.get('{0}/edx4edx_lite'.format(\r\n os.path.abspath(settings.DATA_DIR)), None)\r\n self.assertIsNone(course)",
"def test_script_rejects_bad_xml(self):\n maker = Compilers(test_utils.MockMachines(\"mymachine\", \"SomeOS\"), version=2.0)\n with self.assertRaises(ParseError):\n test_utils.get_macros(maker, \"This is not valid XML.\", \"Makefile\")",
"def test_import_infra(self):\n project = Project.create()\n # Read an engine and check\n infra = import_infra(\"A320.xml\", \"engine\")\n self.assertEqual(len(infra.engines), 1)\n engine = infra.engines[0]\n self.assertEqual(engine.name, \"Machine 0\")\n self.assertEqual(engine.hauteur, 0.0)\n # Local frame:\n self.assertEqual(engine.position.x, 0.0)\n self.assertEqual(engine.position.y, 0.0)\n self.assertEqual(engine.position.z, 0.0)\n\n # Read a building and check\n infra = import_infra(\"Building.xml\", \"building\")\n self.assertEqual(len(infra.buildings), 1)\n building = infra.buildings[0]\n self.assertEqual(building.name, \"MyBuilding\")\n self.assertEqual(building.hauteur, 0.0)\n # Local frame:\n self.assertEqual(building.position.x, 0.0)\n self.assertEqual(building.position.y, 0.0)\n self.assertEqual(building.position.z, 0.0)\n\n # Check a no radiant building is refused:\n try:\n infra = import_infra(\"Building_no_radiant.xml\", \"building\")\n except:\n print(\"Ok, non radiant building is refused as expected.\")\n else:\n print(\"Non radiant building should be refused.\")\n sys.exit(-1)",
"def __init__(self):\n try:\n # this succeeds with python 2\n import StringIO\n class_StringIO = StringIO.StringIO\n except Exception:\n # this succeeds with python 3\n import io\n class_StringIO = io.StringIO\n\n # create some XML with an error\n sio = class_StringIO( \"<foo> <bar> </foo>\\n\" )\n try:\n ET.parse( sio )\n except Exception:\n self.ET_exc_class = sys.exc_info()[0]\n else:\n # something is wrong; the drawback to this fallback is that you\n # cannot distinguish an XML error from other errors\n self.ET_exc_class = Exception",
"def assertValidXML(self, data):\r\n # Just try the load. If it throws an exception, the test case will fail.\r\n self.serializer.from_xml(data)",
"def from_xml(cls, xml_data, system, id_generator):\r\n raise NotImplementedError('Modules must implement from_xml to be parsable from xml')",
"def test_valid_xml(self):\r\n self.build_problem()\r\n self.assertTrue(True)",
"def loadLegacyMetadata(xml_datafile, py_datafile, env_var=None):\n\t# Attempt to load XML datafile\n\tfrom shared import xml_metadata\n\tdata_object = xml_metadata.Metadata()\n\tdata_object_loaded = data_object.load(xml_datafile)\n\n\tif data_object_loaded:\n\t\tif env_var is not None:\n\t\t\tos.environ[env_var] = xml_datafile # update env var\n\t\treturn data_object_loaded, data_object\n\n\t# If XML file doesn't exist, create defaults, and attempt to convert\n\t# data from Python data files.\n\telse:\n\t\t# Try to convert from icData.py to XML or JSON (legacy assets)\n\t\tdata_path = os.path.dirname(xml_datafile)\n\t\tif convertAssetData(data_path, data_object):\n\t\t\tdata_object_loaded = data_object.reload()\n\t\t\treturn data_object_loaded, data_object\n\n\t\telse:\n\t\t\treturn False, None",
"def test_quest_load_version_fail(testing_quest_page):\n testing_quest_page.save()\n\n # fetch the data\n doc = testing_quest_page.doc_ref.get()\n data = testing_quest_page.storage_model.parse_obj(doc.to_dict())\n\n # mess with the version\n data.version = str(VersionInfo.parse(data.version).bump_major())\n testing_quest_page.doc_ref.set(data.dict())\n\n # try to load with the bad version\n with pytest.raises(QuestLoadError):\n testing_quest_page.load()\n\n # cleanup\n testing_quest_page.delete()",
"def test_002_import_exception(self):\n m = schematics_flexible.BaseFlexible(\n {'code': '07',\n 'properties': {\"a\": \"this is test\"}},\n store_handler=get_mock())\n try:\n m.validate()\n except schematicsValidationError:\n pass\n else:\n self.assertTrue(False,\n 'Model must raise exception when import raise')",
"def test_panda_to_xml_from_person_xml(self):\n xml_person = self.test_person.to_xml()\n with self.assertRaises(ValueError):\n self.test_panda.from_xml(xml_person)",
"def load(self):\n log.error('Cannot load: %s', self.file_name)",
"def _check_deprecated_data_xml_node(self):\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath(\"/odoo\") \\\n if not isinstance(doc, string_types) else []\n children, data_node = ((odoo_nodes[0].getchildren(),\n odoo_nodes[0].findall('data'))\n if odoo_nodes else ([], []))\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True",
"def load_xml(finder, module):\n module.IgnoreName(\"_xmlplus\")",
"def loader(self, href, parse, encoding=None):\n \n if href[0] != '/':\n href = os.path.join(self.directory, href)\n if not os.path.isfile(href) or not os.access(href, os.R_OK):\n raise RuntimeException(\"Xml: File %s Doesn't Exist or Not Readable (xi)\" % href)\n \n file = open(href)\n if parse == \"xml\":\n data = ElementTree.parse(file).getroot()\n else:\n data = file.read()\n if encoding:\n data = data.decode(encoding)\n file.close()\n return data",
"def __init__(self, xml_file):\n try:\n # Load xml file\n fd = resource_stream(__name__, 'map/{}'.format(xml_file))\n self.spec = parse(fd)\n self.remove_whitespace_nodes(self.spec, True)\n fd.close()\n\n except OSError:\n raise EDIFileNotFoundError(\n '{} is missing in the package'.format(xml_file)\n )",
"def _xmlRead(self):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n \r\n logger.debug(\"{0:s}xmlFile: {1:s} parse Xml ...\".format(logStr,self.xmlFile)) \r\n tree = ET.parse(self.xmlFile) # ElementTree \r\n root = tree.getroot() # Element\r\n\r\n self.dataFrames=Xm._xmlRoot2Dfs(root)\r\n\r\n #fixes and conversions\r\n self._convertAndFix()\r\n\r\n #Views\r\n self._vXXXX()\r\n \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))",
"def test_findbarxml(self):\n self.run_script_for_compat('alert(e.test(\"findbar.xml\"));')\n self.assert_silent()\n self.assert_compat_error()",
"def test_wrong_xml_namespace(self):\n with pytest.raises(IndexError):\n mocked_request = self.get_signed_grade_mock_request(namespace_lti_v1p1=False)\n self.xmodule.parse_grade_xml_body(mocked_request.body)",
"def test_XMLParser_file_not_found():\n with pytest.raises(OSError):\n parser = XMLParser(\"no_such_file\")",
"def _assert(ok, detail):\n\n if not ok:\n raise UserException(\"the project file is invalid\", detail)",
"def test_validate_invalid(self):\r\n self.assertEqual(get_tree_and_validate(self.invalid_xml, open(self.SCHEMA, 'r').read()), 0)",
"def __init__(self, xml_text):\n logger.verbose(\"Load Version.xml\")\n self.parse(xml_text)",
"def read_xml(self):\n pass"
] |
[
"0.5530275",
"0.5451773",
"0.54403687",
"0.52372694",
"0.5144637",
"0.5054844",
"0.5004451",
"0.49637967",
"0.4963071",
"0.4958828",
"0.49542674",
"0.4944267",
"0.49304378",
"0.49113086",
"0.48677188",
"0.48588017",
"0.4855717",
"0.4820855",
"0.4813323",
"0.47716698",
"0.4762332",
"0.4745949",
"0.47443786",
"0.47439107",
"0.47354472",
"0.4712978",
"0.46656078",
"0.4664356",
"0.46561992",
"0.46553478"
] |
0.59002835
|
0
|
Validate an assertion and raise a UserException if it failed.
|
def _assert(ok, detail):
if not ok:
raise UserException("the project file is invalid", detail)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _Assert(self, t):\n self.RaiseError(t, \"Assert not supported\")",
"def assertion_failed(self, func, exception):",
"def test_validate_user(self):\n with self.assertRaises(ValidationError):\n self.make_assignment(\n self.category, self.user_alice, self.role_contributor\n )",
"def assertion_errored(self, func, exception):",
"def validate_Assert(result, _dummy_condition):\n return result",
"def assert_(self, expr, msg=None):\n \n if not expr: raise Exception, msg",
"def test_form_with_invalid_user(self):\n self._update_site_configuration()\n error, _ = self._assert_form(self.site, self.other_valid_email)\n assert error == \"User with this email doesn't exist in system.\"",
"def test_verify_invalid_user(self, rp_logger):\n\n test_name = sys._getframe().f_code.co_name\n\n rp_logger.info(\"###### TEST EXECUTION STARTED :: \" +\n test_name + \" ######\")\n\n first_name = data_reader.get_data(test_name, 'FirstName')\n last_name = data_reader.get_data(test_name, 'LastName')\n email = data_reader.get_data(test_name, 'Email')\n\n with allure.step(\"Verify whether user exists\"):\n result = base_api.verify_valid_user(\n email, first_name, last_name)\n exe_status.mark_final(test_step=test_name, result=result)",
"def test_trestle_validation_error() -> None:\n msg = 'Custom validation error'\n try:\n raise TrestleValidationError(msg)\n except TrestleValidationError as err:\n assert str(err) == msg\n assert err.msg == msg",
"def test_validate_failure(self, args, value):\n sch = scheme.Scheme(*args)\n with pytest.raises(errors.SchemeValidationError):\n sch.validate(value)",
"def assert_schema_error(*args):\n assert_exception(SchemaError, *args)",
"def ras(assertion, message=\"\"):\n if not assertion:\n raise Exception(message)",
"def validation_assert(predicate):\r\n if not predicate:\r\n raise ProfileDistribution.ValidationError()",
"def test_error_expression(self):\n error_expression = expression.InvalidExpression(\"an error message\")\n # All three of \"penalty_expression\", \"constraint_expression\" and\n # \"extra_constraints\" should raise.\n with self.assertRaises(RuntimeError):\n _ = error_expression.penalty_expression\n with self.assertRaises(RuntimeError):\n _ = error_expression.constraint_expression\n with self.assertRaises(RuntimeError):\n _ = error_expression.extra_constraints",
"def test_validate_when_user_found(self, view, mget_user):\n assert view.validate() is None",
"def test_user_errors(self):\r\n user = User(\r\n email_addr=\"[email protected]\",\r\n name=\"johndoe\",\r\n fullname=\"John Doe\",\r\n locale=\"en\")\r\n\r\n # User.name should not be nullable\r\n user.name = None\r\n db.session.add(user)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()\r\n\r\n # User.fullname should not be nullable\r\n user.name = \"johndoe\"\r\n user.fullname = None\r\n db.session.add(user)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()\r\n\r\n # User.email_addr should not be nullable\r\n user.name = \"johndoe\"\r\n user.fullname = \"John Doe\"\r\n user.email_addr = None\r\n db.session.add(user)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()",
"def test_unverified_user_cannot_log_in(self):\n login_data = {\"email\": \"[email protected]\", \"password\": \"hardpassword\"}\n user = User.objects.create_user(username=\"bob\", email=\"[email protected]\",\n password=\"hardpassword\")\n user.save()\n serializer = LoginSerializer(data=login_data)\n\n with self.assertRaises(ValidationError) as e:\n serializer.validate(login_data)\n\n self.assertEqual(e.exception.detail[0],\n \"Please verify your account to proceed.\")",
"def test_invalid(if_statement_validator):\n test = {\n 'condition': 'is',\n 'target': 'bob',\n }\n validate_result = if_statement_validator(test)\n\n assert not is_successful(validate_result)\n assert isinstance(validate_result.failure(), ValueError)\n assert isinstance(validate_result.failure().args[0][0], ValidationError)\n assert \"'then' is a required property\" in str(validate_result.failure())",
"def test_validation_can_fail():\n\n @type_checked\n def _run_test(something:int): pass\n\n with pytest.raises(TypeError) as error:\n _run_test(\"abc\")\n\n assert \"abc is of type str, expecting int.\" in error.value.args",
"def test_validation(self):\n self.validationFails()",
"def test_validate_input_rejection(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('000011')",
"def _fail(raise_):\n if raise_:\n raise _UnexpectedForm()\n return None",
"def test_invalid_user_input(self):\n m = Message(\n text=None, user_id=None\n )\n db.session.add(m)\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()",
"def test_missing_required_field_raises_error():\n with pytest.raises(ValidationError):\n Entity()",
"def assertValid(self, data, requirement, msg=None):\n # Setup traceback-hiding for pytest integration.\n __tracebackhide__ = lambda excinfo: excinfo.errisinstance(ValidationError)\n\n try:\n validate(data, requirement, msg=msg)\n except ValidationError as err:\n def should_truncate(line_count, char_count):\n return self.maxDiff and (char_count > self.maxDiff)\n err._should_truncate = should_truncate\n\n err._truncation_notice = \\\n 'Diff is too long. Set self.maxDiff to None to see it.'\n\n raise err",
"def assertFailedBeforeEmailing(self, email_user):\r\n self.assertRolledBack()\r\n self.assertFalse(email_user.called)",
"def test_incomplete_user_exception(self):\n u_username_only = User(username=\"incomplete_user\")\n with self.assertRaises(TypeError) as err:\n User.signup(u_username_only)",
"def require(assertion):\n if not assertion:\n raise PermissionDenied",
"def require(assertion):\n if not assertion:\n raise PermissionDenied",
"def _assert_form(self, site, email, is_valid_form=False):\n error = ''\n instance = None\n form = AllowedAuthUserForm({'site': site.id, 'email': email})\n if is_valid_form:\n assert form.is_valid()\n instance = form.save()\n else:\n assert not form.is_valid()\n error = form.errors['email'][0]\n return error, instance"
] |
[
"0.638513",
"0.6113857",
"0.60952735",
"0.60506934",
"0.59488094",
"0.5854287",
"0.5829402",
"0.5820156",
"0.57493156",
"0.57345164",
"0.57106066",
"0.5699617",
"0.56764406",
"0.5654198",
"0.56525695",
"0.5652089",
"0.5625533",
"0.55753726",
"0.55424374",
"0.5541871",
"0.5524159",
"0.55230397",
"0.55047846",
"0.5496444",
"0.54945457",
"0.5486716",
"0.5481013",
"0.5479935",
"0.5479935",
"0.54548526"
] |
0.66372156
|
0
|
Return a populated QrcPackage instance.
|
def _load_package(package_element):
package = QrcPackage()
package.name = package_element.get('name')
_assert(package.name is not None, "Missing 'Package.name' attribute.")
package.contents = _load_mfs_contents(package_element)
package.exclusions = []
for exclude_element in package_element.iterfind('Exclude'):
name = exclude_element.get('name', '')
_assert(name != '',
"Missing or empty 'Package.Exclude.name' attribute.")
package.exclusions.append(name)
return package
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _package(self):\n if self._package_obj is None:\n self._package_obj = self._import_package()\n\n return self._package_obj",
"def get_package(cls, name: str):\n pkg = Package(name=name)\n return pkg",
"def real_obj(self):\n return RemotePackage(xml_data=etree.tostring(self))",
"def __init__(self, package_name: str) -> None:\n self._package_name = package_name\n self._package_obj: Optional[ModuleType] = None",
"def rpConstruct(cls):\r\n return cls(None, None, None)",
"def package(self, root):\n return Package(self._archive_impl, root)",
"def create_package(self):\n return self.create(\"RequestedPackageLineItem\")",
"def from_metadata(cls, meta):\r\n return Package(\r\n name=meta[\"Name\"],\r\n version=meta[\"Version\"],\r\n homepage=meta[\"Home-page\"],\r\n license_name=meta[\"License\"],\r\n source_url=meta[\"Project-URL\"],\r\n package_url=purl_extractor.get_purl_from_meta_dict(meta),\r\n )",
"def get_package(self, __package_id):\n raise NotImplementedError",
"def __init__(__self__, *,\n packaging_group_id: pulumi.Input[str],\n cmaf_package: Optional[pulumi.Input['PackagingConfigurationCmafPackageArgs']] = None,\n dash_package: Optional[pulumi.Input['PackagingConfigurationDashPackageArgs']] = None,\n hls_package: Optional[pulumi.Input['PackagingConfigurationHlsPackageArgs']] = None,\n mss_package: Optional[pulumi.Input['PackagingConfigurationMssPackageArgs']] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['PackagingConfigurationTagArgs']]]] = None):\n pulumi.set(__self__, \"packaging_group_id\", packaging_group_id)\n if cmaf_package is not None:\n pulumi.set(__self__, \"cmaf_package\", cmaf_package)\n if dash_package is not None:\n pulumi.set(__self__, \"dash_package\", dash_package)\n if hls_package is not None:\n pulumi.set(__self__, \"hls_package\", hls_package)\n if mss_package is not None:\n pulumi.set(__self__, \"mss_package\", mss_package)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)",
"def _get_package_items(self):\r\n mask = \"mask[description,capacity,prices.id,categories[name,id]]\"\r\n package = self.client['Product_Package']\r\n return package.getItems(id=46, mask=mask)",
"def get_package_from_dict(pkg_dict):\n pkg_obj = Package(pkg_dict['name'])\n pkg_obj.version = (\"\" if pkg_dict['versionInfo'] == 'NOASSERTION'\n else pkg_dict['versionInfo'])\n pkg_obj.proj_url = (\"\" if pkg_dict['downloadLocation'] == 'NONE'\n else pkg_dict['downloadLocation'])\n pkg_obj.copyright = (\"\" if pkg_dict['copyrightText'] == 'NONE'\n else pkg_dict['copyrightText'])\n return pkg_obj",
"def package(self) -> pulumi.Output['outputs.PackageResponse']:\n return pulumi.get(self, \"package\")",
"def package(self) -> Optional[pulumi.Input['PackageArgs']]:\n return pulumi.get(self, \"package\")",
"def generate(self):\n\n if self.check_data():\n pkg_dict = self.create_package_dict()\n pkg_dir = pkg_dict['dir'] + pkg_dict['name']\n if os.path.exists(pkg_dir):\n but = QMessageBox().question(self, 'Message', \"Такой проект уже существует! Хотите перезаписать?\", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if but == QMessageBox.Yes:\n shutil.rmtree(pkg_dir)\n self.pkg = package.RosPackage(pkg_dict)\n self.msg2Statusbar.emit('Успешная генерация')\n else:\n self.pkg = package.RosPackage(pkg_dict)\n self.msg2Statusbar.emit('Успешная генерация')",
"def package_module(name):\n return Package(name, f\"Package (filler) module {name}\")",
"def installQPackage(self, name, domain, version, reconfigure=True):\n installPackageCommand = \"\"\"p = q.qp.find(name=\"%(name)s\", domain=\"%(domain)s\", version=\"%(version)s\")\nif not p:\n raise valueError(\"Package %(domain)s, %(name)s, %(version)s not found\")\nelif len(p) <> 1:\n raise valueError(\"Too many packages found with search criteria %(domain)s, %(name)s, %(version)s\")\nelif not p[0].isInstalled():\n p[0].install()\nelse:\n print \"Package %(domain)s, %(name)s, %(version)s is already installed\"\n\"\"\"%{'name':name,'version':version,'domain':domain,'reconfigure':reconfigure}\n self.executeQshell(installPackageCommand)\n if reconfigure:\n self.executeQshell(\"q.qp._runPendingReconfigeFiles()\")",
"def parse(self) -> Sequence[PackageItem]:\n raise NotImplementedError(\"parse() method is not implemented\")",
"def init_package(self):\n package_name = self.args.name\n if package_name is None:\n msg = 'an package name must provide for enzi init'\n logging.error(msg)\n raise SystemExit(BASE_ESTRING + msg)\n\n initializer = ProjectInitialor(package_name)\n initializer.init()",
"def __getitem__(self, package):\n\n\t\treturn self._packages.setdefault(\n\t\t\tpackage,\n\t\t\tPackage(package)\n\t\t)",
"def clone(self):\n return _libsbml.QualPkgNamespaces_clone(self)",
"def __init__(self, package_id, address, city, state,\n zip_code, deadline, weight, time_at_status, notes):\n self.id = package_id\n self.address = address\n self.city = city\n self.state = state\n self.zip_code = zip_code\n self.deadline = deadline\n self.weight = weight\n self.status = {PackageStatus(1): time_at_status, PackageStatus(2): \"N/A\", PackageStatus(3): \"N/A\"}\n self.notes = notes",
"def fill(self):\n # type: () -> ImmutableJar\n return ImmutableJar(capacity=self.capacity, units=self.capacity)",
"def build(self):\n root = ET.Element(\"package\", **self.attr)\n self.build_meta(root)\n self.build_manifest(root)\n self.build_spine(root)\n return root",
"def __init__(self):\n\n # connect to quilt\n self.pkg = quilt3.Package.browse(\n \"aics/segmenter_model_zoo\", registry=\"s3://allencell\"\n )\n self.meta = self.pkg[\"metadata.csv\"]()",
"def __init__(\n self,\n package_name: str,\n version: str = \"0.0.1\",\n description: str = \"MicroPython stubs\",\n stubs: Optional[StubSources] = None,\n json_data: Optional[Dict[str, Any]] = None,\n ):\n if json_data is not None:\n self.from_dict(json_data)\n\n else:\n # store essentials\n # self.package_path = package_path\n self.package_name = package_name\n self.description = description\n self.mpy_version = clean_version(version, drop_v=True) # Initial version\n self.hash = None # intial hash\n \"\"\"Hash of all the files in the package\"\"\"\n self.stub_hash = None # intial hash\n \"\"\"hash of the the stub files\"\"\"\n self.create_update_pyproject_toml()\n\n self.stub_sources: StubSources = []\n # save the stub sources\n if stubs:\n self.stub_sources = stubs\n\n self._publish = True\n self.status: Status = Status(\n {\n \"result\": \"-\",\n \"name\": self.package_name,\n \"version\": self.pkg_version,\n \"error\": None,\n }\n )",
"def cmaf_package(self) -> pulumi.Output[Optional['outputs.PackagingConfigurationCmafPackage']]:\n return pulumi.get(self, \"cmaf_package\")",
"def prepare(self):\n return self",
"def __init__ ( self, package_info, err_queue, depres_channel_spawner=None ):\n self.package_info = package_info\n self.package_info.set_readonly()\n\n self.logger = LOGGER.getChild ( package_info ['name'] )\n\n # > 0 busy/working; 0 == done,success; < 0 done,fail\n self.status = 1\n # function reference that points to the next task\n self._resume = self._run_prepare\n self.paused = False\n\n self.depres_channel_spawner = depres_channel_spawner\n\n self.err_queue = err_queue\n\n #self.use_expand_flag_names = None",
"def initDefault(self):\n #--Package Only\n self.archive = ''\n self.modified = 0 #--Modified date\n self.size = 0 #--size of archive file\n self.crc = 0 #--crc of archive\n self.type = 0 #--Package type: 0: unset/invalid; 1: simple; 2: complex\n self.fileSizeCrcs = []\n self.subNames = []\n self.src_sizeCrcDate = {} #--For InstallerProject's\n #--Dirty Update\n self.dirty_sizeCrc = {}\n #--Mixed\n self.subActives = []\n #--User Only\n self.skipVoices = False\n self.hasExtraData = False\n self.comments = ''\n self.group = '' #--Default from abstract. Else set by user.\n self.order = -1 #--Set by user/interface.\n self.isActive = False\n self.espmNots = set() #--Lowercase esp/m file names that user has decided not to install.\n #--Volatiles (unpickled values)\n #--Volatiles: directory specific\n self.refreshed = False\n #--Volatile: set by refreshDataSizeCrc\n self.readMe = self.packageDoc = self.packagePic = None\n self.data_sizeCrc = {}\n self.skipExtFiles = set()\n self.skipDirFiles = set()\n self.espms = set()\n self.unSize = 0\n #--Volatile: set by refreshStatus\n self.status = 0\n self.underrides = set()\n self.missingFiles = set()\n self.mismatchedFiles = set()\n self.mismatchedEspms = set()"
] |
[
"0.61555564",
"0.6028244",
"0.57431936",
"0.57262385",
"0.5538875",
"0.55191153",
"0.5511188",
"0.54345876",
"0.5383191",
"0.52641064",
"0.5221279",
"0.5204265",
"0.5195892",
"0.51358944",
"0.5101493",
"0.5052417",
"0.4975617",
"0.4965095",
"0.49423385",
"0.49272996",
"0.49205995",
"0.48968223",
"0.48829854",
"0.48801547",
"0.4866861",
"0.48638204",
"0.48510775",
"0.4824574",
"0.48051682",
"0.47812513"
] |
0.64988154
|
0
|
Replace any qmake scopes in a value.
|
def _replace_scopes(value):
value = value.replace('linux-*', 'linux')
value = value.replace('macx', 'macos')
value = value.replace('win32', 'win')
return value
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_scope(self, value):\n self._set_one_attribute(self.AttributeNames.SCOPE, value)\n return self",
"def ReplaceDepsVar(deps_file, variable_name, value):\n with open(deps_file, 'r') as file_handle:\n contents_old = file_handle.read()\n contents_new = re.sub(\n '\"%s\":.*,' % variable_name,\n '\"%s\": \"%s\",' % (variable_name, value),\n contents_old)\n with open(deps_file, 'w') as file_handle:\n file_handle.write(contents_new)",
"def replace(self, text):\n for key, val in self.env.items():\n text = text.replace(\"$%s\" % key, val)\n return text",
"def doEdit(var, value, target):\n currentValue = target.get(var, \"\")\n newValue = Simplifier.simplify(str(value).replace(f\"{{{var}}}\", str(currentValue)))\n target[var] = newValue",
"def _replacement(name):\n ret = os.getenv(name, values.get(name, \"\"))\n return ret",
"def substitute_macros(text):\n f_text = text\n for (pattern,replacement) in context.environment.items():\n replacement = replacement.replace(os.path.sep,'/')\n f_text = f_text.replace('$(%s)' % pattern.upper(), replacement)\n return f_text",
"def setGlobal(name, value):",
"def fusion_api_edit_scope(self, uri, body=None, api=None, headers=None, eTag=None):\n\n return self.scope.put(uri=uri, body=body, api=api, headers=headers, eTag=eTag)",
"def texify(value):\n for k, v in REPLACEMENTS.items():\n value = value.replace(k, v)\n return mark_safe(value)",
"def updateScopes(markup):\n markupNew = markup.copy()\n if markupNew.getVerbose():\n print u\"updating scopes\"\n modifiers = markupNew.getConTextModeNodes(\"modifier\")\n for modifier in modifiers:\n if(markupNew.getVerbose()):\n print(u\"old scope for %s is %s\"%(modifier.__str__(),modifier.getScope()))\n modifier.setScope()\n if(markupNew.getVerbose()):\n print(u\"new scope for %s is %s\"%(modifier.__str__(),modifier.getScope()))\n\n\n # Now limit scope based on the domains of the spans of the other\n # modifier\n for i in range(len(modifiers)-1):\n modifier = modifiers[i]\n for j in range(i+1,len(modifiers)):\n modifier2 = modifiers[j]\n if( TO.limitScope(modifier,modifier2) and\n modifier2.ti.citem.getRule().lower() == 'terminate'):\n markupNew.add_edge(modifier2,modifier)\n if( TO.limitScope(modifier2,modifier) and\n modifier.ti.citem.getRule().lower() == 'terminate'):\n markupNew.add_edge(modifier,modifier2)\n markupNew.graph[\"__SCOPEUPDATED\"] = True\n return markupNew",
"def _change_references(path, name, val):\n\n text = _open_file(path)\n for row in text.split('\\n'):\n if row.startswith(name + \"=\"):\n row = f'{name}={val}'\n yield row",
"def set_in_context(self,**params):\n for name,val in params.items():\n if name in self.context:\n self.warning(\"Replacing previous value of '%s' with '%s'\"%(name,val))\n self.context[name]=val\n self.unused_names.add(name)",
"def expand_vars(self, line, name_dict):\n if '%' not in line:\n return line\n for k, v in name_dict.iteritems():\n line = line.replace('%VAR_' + k + '%', escape(v))\n return line",
"def remove_from_environment_variable(parent, key, value):\n environ_list = maybe_environ(key).split(os.pathsep)\n environ_list.remove(value)\n\n os.environ[key] = os.pathsep.join(environ_list)\n\n # See http://stackoverflow.com/questions/370047/\n if parent:\n parent.remove_from_environment_variable(key, value)",
"def overrideGlobalDefine(self,name,value):\n if name in self.globalDefines:\n self.globalDefines[name] = value\n self.refreshGlobals()\n else:\n raise Exception(\"Trying to override inexistent define!\")",
"def do_set(self,args):\n self.bindings.update(parse_kvs(re.split(' +',args)))\n print_bindings(self.bindings)",
"def remove_from_environment_variable(self, key, value):\n value = BashParentEnvironment._format_environment_value(value)\n script = (\"export {k}=$(python -c \\\"\"\n \"print(\\\\\\\":\\\\\\\".join(\\\\\\\"${k}\\\\\\\".split(\\\\\\\":\\\\\\\")\"\n \"[:\\\\\\\"${k}\\\\\\\".split(\\\\\\\":\\\\\\\").index(\\\\\\\"{v}\\\\\\\")] + \"\n \"\\\\\\\"${k}\\\\\\\".split(\\\\\\\":\\\\\\\")[\\\\\\\"${k}\\\\\\\".split(\\\\\\\":\\\\\\\")\"\n \".index(\\\\\\\"{v}\\\\\\\") + 1:]))\\\")\")\n # There is something about the format() method on str which causes\n # pychecker to trip over when passing keyword arguments. Just\n # pass keyword arguments using the ** notation.\n script_keys = {\n \"k\": key,\n \"v\": value\n }\n script = script.format(**script_keys)\n self._printer(script)",
"def resourcescope(self, **content):\n self.resource_scopes.update(content)\n self.resource_scopes.update(self.overwrite_resource_scopes)",
"def replace(*args, **kwds):\n ns = utils.ns_prepare(args + (kwds,), install=False)\n settings = utils.settings_from_ns(ns, update=True)\n return settings",
"def remove_from_environment_variable(self, key, value):\n script = (\"$env:{k} = python -c \\\"\"\n \"print(';'.join(r'$env:{k}'.split(';')\"\n \"[:r'$env:{k}'.split(r';').index(r'{v}')] + \"\n \"r'$env:{k}'.split(';')\"\n \"[r'$env:{k}'.split(';')\"\n \".index(r'{v}') + 1:]))\\\"\")\n script_keys = {\n \"k\": key,\n \"v\": value\n }\n script = script.format(**script_keys)\n self._printer(script)",
"def line_replacer(config,change_this_line,key):\n for arg in config['HyperParameter'][key]: \n pattern=r'{}[ ]*=.*,'.format(arg)\n replace_value=config['HyperParameter'][key][arg][counter]\n if type(replace_value) is str:\n replace_value=\"'\"+replace_value+\"'\"\n change_this_line=re.sub(pattern,\"{}= {},\".format(arg,replace_value),change_this_line)\n return change_this_line",
"def ShrinkToSnippet(cmd_parts, var_name, var_value):\n\n def shrink(value):\n parts = (x and SingleQuote(x) for x in value.split(var_value))\n with_substitutions = ('\"$%s\"' % var_name).join(parts)\n return with_substitutions or \"''\"\n\n return ' '.join(shrink(part) for part in cmd_parts)",
"def expand_values(self):\n for k, v in self.job.items():\n foundkey = re.search(self.key_pattern, v)\n # This is done iteratively so that it doesn't matter what order\n # lines appear in a bake parameter file\n while foundkey:\n v = v.replace(\n foundkey.group(0),\n self.job[foundkey.group(0)])\n foundkey = re.search(self.key_pattern, v)\n self.job[k] = v",
"def replace_filevalue(file_name, orgval, newval):\n for line in fileinput.input(file_name, inplace = 1):\n print line.replace(str(orgval), str(newval)),\n fileinput.close()",
"def change_scopes(request, ally_id):\n\n scopes = []\n for key in request.POST:\n if key in ESI_SCOPES:\n scopes.append(key)\n\n if len(scopes) > 0:\n data = \"{\\\"mandatory_esi_scopes\\\": [\\\"\" + \"\\\",\\\"\".join(scopes) + \"\\\"]}\"\n else:\n data = \"{\\\"mandatory_esi_scopes\\\": []}\"\n request_change = requests.put(GLOBAL_URL+f\"/{ally_id}\", headers=global_headers(request, {\"Content-type\":\"application/json\"}), data=data)\n if request_change.status_code != 200:\n return render_error(request_change)\n\n params = urlencode({\"changed_scopes\": \"true\"})\n return_url = reverse(\"alliance-sheet\", args=[ally_id]) + \"?\" + params\n return redirect(return_url)",
"def add_template_scope(node, names, values):\n scope = sortedmap()\n for name, value in zip(names, values):\n scope[name] = value\n node.scope = scope",
"def push_local_ns(self, name, value):\n self.interpreter.locals[name] = value",
"def _replace_jinja2_vars(lines: List[str], jinja2_vars: dict) -> List[str]:\n # these regex find jinja2 set statements without and with selectors\n jinja2_re = re.compile(r\"^(\\s*){%\\s*set\\s*(.*)=\\s*(.*)%}(.*)\")\n jinja2_re_selector = re.compile(r\"^(\\s*){%\\s*set\\s*(.*)=\\s*(.*)%}\\s*#\\s*\\[(.*)\\]\")\n\n all_jinja2_keys = set(list(jinja2_vars.keys()))\n used_jinja2_keys = set()\n\n # first replace everything we can\n # we track which kets have been used so we can add any unused keys ar the\n # end\n new_lines = []\n for line in lines:\n _re_sel = jinja2_re_selector.match(line)\n _re = jinja2_re.match(line)\n\n # we only replace simple constant set statements\n if (_re_sel or _re) and not _is_simple_jinja2_set(line):\n new_lines.append(line)\n continue\n\n if _re_sel:\n # if the line has a selector in it, then we need to pull\n # out the right key with the selector from jinja2_vars\n spc, var, val, sel = _re_sel.group(1, 2, 3, 4)\n key = var.strip() + CONDA_SELECTOR + sel\n if key in jinja2_vars:\n _new_line = (\n spc\n + \"{% set \"\n + var.strip()\n + \" = \"\n + json.dumps(jinja2_vars[key])\n + \" %} # [\"\n + sel\n + \"]\\n\"\n )\n used_jinja2_keys.add(key)\n else:\n _new_line = line\n elif _re:\n # no selector\n spc, var, val, end = _re.group(1, 2, 3, 4)\n if var.strip() in jinja2_vars:\n _new_line = (\n spc\n + \"{% set \"\n + var.strip()\n + \" = \"\n + json.dumps(jinja2_vars[var.strip()])\n + \" %}\"\n + end\n )\n used_jinja2_keys.add(var.strip())\n else:\n _new_line = line\n else:\n _new_line = line\n\n if _new_line is not None:\n if _new_line[-1] != \"\\n\":\n _new_line = _new_line + \"\\n\"\n\n new_lines.append(_new_line)\n\n # any unused keys, possibly with selectors, get added here\n if all_jinja2_keys != used_jinja2_keys:\n extra_lines = []\n extra_jinja2_keys = all_jinja2_keys - used_jinja2_keys\n for key in sorted(list(extra_jinja2_keys)):\n if CONDA_SELECTOR in key:\n _key, selector = key.split(CONDA_SELECTOR)\n extra_lines.append(\n \"{% set \"\n + _key\n + \" = \"\n + json.dumps(jinja2_vars[key])\n + \" %}\"\n + \" # [\"\n + selector\n + \"]\\n\",\n )\n else:\n extra_lines.append(\n \"{% set \"\n + key\n + \" = \"\n + json.dumps(jinja2_vars[key])\n + \" %}\"\n + \"\\n\",\n )\n\n new_lines = extra_lines + new_lines\n\n return new_lines",
"def replace_variables(self, text, context):\n text = text.replace('__VENV_DIR__', context.env_dir)\n text = text.replace('__VENV_NAME__', context.env_name)\n text = text.replace('__VENV_PROMPT__', context.prompt)\n text = text.replace('__VENV_BIN_NAME__', context.bin_name)\n text = text.replace('__VENV_PYTHON__', context.env_exe)\n return text",
"def replace_platform_variable(name, op):\n if not name in g_platform_variables:\n raise RuntimeError(\"trying to destroy nonexistent platform variable '%s'\" % (name))\n g_platform_variables[name] = { \"default\" : op }"
] |
[
"0.560765",
"0.55158067",
"0.53255934",
"0.526088",
"0.5163381",
"0.5125354",
"0.51055837",
"0.5105084",
"0.5008712",
"0.500784",
"0.4998744",
"0.49631014",
"0.49616838",
"0.49495396",
"0.49195024",
"0.49156302",
"0.4911678",
"0.4909545",
"0.49070176",
"0.48991",
"0.4882357",
"0.48733702",
"0.4848232",
"0.4839295",
"0.48365682",
"0.4820897",
"0.4811187",
"0.48090577",
"0.48034498",
"0.47938398"
] |
0.72859895
|
0
|
Shift the mean value of `x_mod` such that it equals the mean of `x_org`.
|
def shift_mean(x_mod, x_org):
@_decorate_validation
def validate_input():
_numeric('x_mod', ('integer', 'floating', 'complex'), shape=(-1, -1))
_numeric('x_org', ('integer', 'floating', 'complex'),
shape=x_mod.shape)
validate_input()
return x_mod + (x_org.mean() - x_mod.mean())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def normalize(x):\n MEAN_VALUES = np.array([104, 117, 123])\n means = theano.shared(MEAN_VALUES.astype(\"float32\"))\n return x[:, ::-1, :, :] - means[np.newaxis, :, np.newaxis, np.newaxis]",
"def normalize_scl(self,x):\n max_val = np.max(x['data'][0])\n last_val = x['data'][0][-1]\n return last_val/max_val",
"def x_mean(self):\n return self._get_mean_pole(\"x\")",
"def adjust(data):\n mu = mean(data)\n return mu, map(lambda x: (x-mu), data)",
"def adjust(x):\n return numpy.insert(x, 0, 1, axis=1)",
"def calculate_mean_mu(self, xi, xf, l):\n\n return (xf - xi) / l",
"def untruncatedMean(self, x):\n self.raiseAnError(NotImplementedError,'untruncatedMean not yet implemented for ' + self.type)",
"def normalize(self, X):\n return X - X.mean()",
"def bin_normalize_moving(self, x):\n return _bin_normalize(x, self.mmin, self.mdelta)",
"def reduced_normal_pmf(x: np.array, mean: float, sigma: float) -> np.array:\n x = np.exp(-1 / 2 * ((x - mean) / sigma) ** 2)\n x /= np.sqrt(2 * np.pi * sigma ** 2)\n x[mean] = 0.\n x /= x.sum()\n return x",
"def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):\n # For each seed, climb gradient until convergence or max_iter",
"def reset_mean(self,new_mean):\n self.mean = new_mean\n return",
"def reset_mean(self,new_mean):\n self.mean = new_mean\n return",
"def _calc_xmin(self, xmax, xmean):\n self.xmin = xmean-.01\n self.xmax = xmax\n for i in itertools.count():\n self.norm = self._integrate(self.xmax)-self._integrate(self.xmin)\n newmean = self.mean()\n delta = - newmean + xmean\n #print self.xmin, newmean, self.xmax, delta\n if abs(delta) < self.tol:\n break\n self.xmin += delta\n if i > 1e3:\n raise ValueError(\"Power law parameter calculation did not converge: \"\n \"power=%s, xmean=%s, xmax=%s\"%(self.power, xmean, xmax))\n return self.xmin",
"def updateMeanAndVar(X, log_gamma, varianceFloor=5.0):",
"def moving_average(x, window_len = 11):\n\n one_over_size = 1./float(window_len)\n window = np.ones(window_len) * one_over_size\n result = np.convolve(x, window, 'valid')\n # Cut off the clutter at the boundaries\n #return result[ (window_len-1)/2 : -(window_len-1)/2]\n return result",
"def __ExpMovingAverage(self, values, window):\n weights = np.exp(np.linspace(-1., 0., window))\n weights /= weights.sum()\n a = np.convolve(values, weights, mode='full')[:len(values)]\n a[:window] = a[window]\n return a",
"def shift_world_x(self, shift_x):\n\n # Keep track of the shift amount\n self.world_shift_x += shift_x\n\n # Go through all the sprite lists and shift\n for platform in self.platform_list:\n platform.rect.x += shift_x\n\n for enemy in self.enemy_list:\n enemy.rect.x += shift_x\n # shift x for projectiles\n if enemy.total_snowballs > 0:\n snowballs = enemy.snowballGroup.sprites()\n for ball in snowballs:\n ball.rect.x += shift_x\n if enemy.numOfDarts > 0:\n darts = enemy.dartGroup.sprites()\n for dart in darts:\n dart.rect.x += shift_x\n\n for exit_door in self.exit_sprite:\n exit_door.rect.x += shift_x\n\n for bag in self.bagGroup:\n bag.rect.x += shift_x",
"def modReduce(self, x):\n\n assert 0 <= x < pow(self.mod, 2), 'out of range.'\n q = (x * self.u) >> (2 * self.M_bit)\n r = x - q * self.mod\n while r >= self.mod:\n r -= self.mod\n return r",
"def random_shift(x, fraction):\n min_x, max_x = np.min(x), np.max(x)\n m = np.random.uniform(-fraction, fraction, size=x.shape) + 1\n return np.clip(x * m, min_x, max_x)",
"def _updateSymbolDistributionMean(self): \n # Update the mean of the Gaussian \n for i in range(1,self.K+1):\n num = 0\n den = 0\n for n in range(1,self.N+1):\n for t in range(1,self.T+1):\n pos_prob = self.posterior_state_trellis[n][(t,i)]\n data_value = self.data[(n,t)]\n num = num + (pos_prob*data_value)\n den = den + pos_prob \n \n self.state_symbol_prob[i]['mean'] = num/den",
"def batch_normalization(x, phase_train, out_size):\n\n\twith tf.variable_scope('bn'):\n\t\tbeta = tf.Variable(tf.constant(0.0, shape=[out_size]), name='beta', trainable=True)\n\t\tgamma = tf.Variable(tf.constant(1.0, shape=[out_size]), name='gamma', trainable=True)\n\t\tbatch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\n\t\tema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n\t\tdef mean_var_with_update():\n\t\t\tema_apply_op = ema.apply([batch_mean, batch_var])\n\t\t\twith tf.control_dependencies([ema_apply_op]):\n\t\t\t\treturn tf.identity(batch_mean), tf.identity(batch_var)\n\n\t\tmean, var = tf.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))\n\t\tnormed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n\treturn normed",
"def expanding_mean(arr):\n total_len = arr.shape[0]\n return ((arr / total_len).cumsum() / np.arange(1, total_len + 1)) * total_len",
"def increase_x(self, state_amount):\n pos_amount = state_amount / self.space_subdivisions\n self.pos_x = (self.pos_x + pos_amount) % 1.0",
"def update(self,x): #update the estimate of rewards and number of esteps run\n\t\tself.N += 1\n\t\tself.estimate_mean = (1.0-1.0/self.N)*self.estimate_mean + (1.0/self.N)*x #recurence relation for averages",
"def batch_norm(x, phase_train, scope='bn', affine=True):\n\n with tf.variable_scope(scope):\n og_shape = x.get_shape().as_list()\n if len(og_shape) == 2:\n x = tf.reshape(x, [-1, 1, 1, og_shape[1]])\n shape = x.get_shape().as_list()\n beta = tf.Variable(tf.constant(0.0, shape=[shape[-1]]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[shape[-1]]),\n name='gamma', trainable=affine)\n\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.9)\n ema_apply_op = ema.apply([batch_mean, batch_var])\n ema_mean, ema_var = ema.average(batch_mean), ema.average(batch_var)\n\n def mean_var_with_update():\n \"\"\"Summary\n Returns\n -------\n name : TYPE\n Description\n \"\"\"\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema_mean, ema_var))\n\n normed = tf.nn.batch_norm_with_global_normalization(\n x, mean, var, beta, gamma, 1e-3, affine)\n if len(og_shape) == 2:\n normed = tf.reshape(normed, [-1, og_shape[-1]])\n return normed",
"def batch_normalization(x, phase_train, out_size):\r\n with tf.variable_scope('bn'):\r\n beta = tf.Variable(tf.constant(0.0, shape=[out_size]),\r\n name='beta', trainable=True)\r\n gamma = tf.Variable(tf.constant(1.0, shape=[out_size]),\r\n name='gamma', trainable=True)\r\n batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\r\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\r\n\r\n def mean_var_with_update():\r\n ema_apply_op = ema.apply([batch_mean, batch_var])\r\n with tf.control_dependencies([ema_apply_op]):\r\n return tf.identity(batch_mean), tf.identity(batch_var)\r\n\r\n mean, var = tf.cond(phase_train,\r\n mean_var_with_update,\r\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\r\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\r\n return normed",
"def mean(image, selem, out=None, mask=None, shift_x=False, shift_y=False):\n\n return _apply(_crank8.mean, _crank16.mean, image, selem, out=out,\n mask=mask, shift_x=shift_x, shift_y=shift_y)",
"def normalize_rank(self, x, axis=-1):\n\t\tx = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n\t\treturn x",
"def add_rolling_mean(self, rm):\n self.data['rolling_mean'] = rm"
] |
[
"0.5332351",
"0.5176737",
"0.51122636",
"0.5105994",
"0.51024467",
"0.50528365",
"0.5001982",
"0.49988565",
"0.496684",
"0.49610654",
"0.49609196",
"0.4941782",
"0.4941782",
"0.4908955",
"0.49046615",
"0.48930743",
"0.48907068",
"0.48776814",
"0.48756415",
"0.48735654",
"0.48541453",
"0.48476636",
"0.48447862",
"0.48425934",
"0.48238254",
"0.4820501",
"0.48182687",
"0.4805529",
"0.4797914",
"0.47842512"
] |
0.8315604
|
0
|
Stretch image such that pixels values are in the range [0, `max_val`].
|
def stretch_image(img, max_val):
@_decorate_validation
def validate_input():
_numeric('img', 'floating', shape=(-1, -1))
_numeric('max_val', ('integer', 'floating'), range_='(0;inf)')
validate_input()
min_ = img.min()
max_ = img.max()
if max_ > min_:
val = max_val / (max_ - min_) * (img - min_)
else:
val = img - min_
return val
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def scale(image, maxval=1024):\n image += maxval # minimum value is now 0\n image /= maxval*2\n\n return(image)",
"def scale_floor(value, old_max, new_max):\n\tassert value >= 0\n\tassert value <= old_max\n\treturn new_max * value / old_max",
"def stretch(imgLayer, numStdDev, minVal, maxVal, ignoreVal, \n globalMean, globalStdDev, outputNullVal=0):\n\n stretched = (minVal + \n ((imgLayer - globalMean + globalStdDev * numStdDev) * (maxVal - minVal))/(globalStdDev * 2 *numStdDev))\n \n stretched = stretched.clip(minVal, maxVal)\n stretched = numpy.where(imgLayer == ignoreVal, outputNullVal, stretched)\n return stretched",
"def _resize(img, max_dim=128):\n if max(img.shape[:3]) <= max_dim:\n return img\n else:\n new_size = [max_dim / s if s >= max_dim else 1.0 for s in img.shape[:3]]\n new_size.append(1.0) # for channel\n return scipy.ndimage.zoom(img, new_size, order=2)",
"def saturate_scalar_minmax(value, max_value, min_value):\n mean = (max_value + min_value)/2.0\n half_range = (max_value - min_value)/2.0\n return saturate_vector_dg(value-mean, half_range) + mean",
"def normalise_slice(slice, max_val=255):\n slice = slice - slice.min()\n slice = slice / np.float(slice.max())\n slice = slice * max_val\n return(slice)",
"def rescale255(img, maximum=None):\n if maximum is None:\n maximum = img.max()\n return np.array(img * (255 / maximum), dtype=np.uint8)",
"def clip_image(img, min=0, max=1):\n img[img < min] = min\n img[img > max] = max\n return img",
"def scale_ceil(value, old_max, new_max):\n\tassert value >= 0\n\tassert value <= old_max\n\treturn div_ceil(new_max * value, old_max)",
"def clip(val, val_min, val_max):\n return min(val_max, max(val_min, val))",
"def scale_img(img):\r\n # Scale values of img between 0 and 255.\r\n img -= np.amin(img)\r\n img /= np.amax(img)\r\n img *= 255\r\n return img",
"def normalize_minmax(data):\n _min = np.float(np.min(data))\n _max = np.float(np.max(data))\n if (_max-_min)!=0:\n img = (data - _min) / (_max-_min)\n else:\n img = np.zeros_like(data) \n return img",
"def scale(arrayin,Amin,Amax,mask=None):\r\n if (mask==None) and (arrayin.max() - arrayin.min())!=0.0 :\r\n Bmax = arrayin.max()\r\n Bmin = arrayin.min()\r\n elif (arrayin.max() - arrayin.min())!=0.0 :\r\n ny = arrayin.shape[0]\r\n nx = arrayin.shape[1]\r\n Bmax = arrayin.min()\r\n Bmin = arrayin.max()\r\n for i in range(ny):\r\n for j in range(ny):\r\n if mask[i,j] > 0.5e0 :\r\n if arrayin[i,j] < Bmin :\r\n Bmin = arrayin[i,j]\r\n if arrayin[i,j] > Bmax :\r\n Bmax = arrayin[i,j]\r\n else :\r\n print \"andrew.bagOfns.scale : warning (arrayin.max() - arrayin.min())=0.0 \"\r\n return np.copy(arrayin)\r\n\r\n arrayout = (arrayin - Bmin)*(Amax - Amin) / (Bmax - Bmin) + Amin\r\n return arrayout",
"def stretch(array: np.ndarray, min: int = 0, max: int = 1) -> np.array:\n if max <= min:\n raise ValueError(\n f\"Max must be larger than min. Passed max of {max} was <= {min}\"\n )\n dtype_info = get_dtype_info(array.dtype)\n if max > dtype_info.max:\n raise ValueError(\n f\"Max of {max} was larger than the allowed datatype maximum of {dtype_info.max}\"\n )\n if min < dtype_info.min:\n raise ValueError(\n f\"Min of {min} was smaller than the allowed datatype minimum of {dtype_info.min}\"\n )\n\n return ground(normalize(ground(array)) * (max - min), value=min)",
"def _min_max_scale(arr, new_range=(0, 255)):\n # get array's current min and max\n mn = arr.min()\n mx = arr.max()\n\n # check if scaling needs to be done to be in new_range\n if mn < new_range[0] or mx > new_range[1]:\n # perform min-max scaling\n scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]\n else:\n # return array if already in range\n scaled = arr\n\n return scaled",
"def rescale_data(data, mask, min_value=None, max_value=None):\n \n assert data.shape == mask.shape\n \n # Invert mask for masked array\n mask = mask == 0\n ma = np.ma.array(data, mask=mask)\n \n if not min_value:\n min_value = ma.min()\n if not max_value:\n max_value = ma.max()\n \n ma = (ma - min_value) / (max_value - min_value)\n data = ma.filled(0)\n return data",
"def scale_to_255(a, min, max, dtype=np.uint8):\n return (((a - min) / float(max - min)) * 255).astype(dtype)",
"def scale0to1(img):\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if min == max:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)",
"def scaleImage(img, min):\n scales = []\n img_ = img.copy()\n h, w, c = img.shape\n factor = 0.8\n while h >= min and w >= min:\n img_ = cv2.resize(img_, (w, h))\n scales.append(img_)\n h = int(h * factor)\n w = int(w * factor)\n\n return scales",
"def scale0to1(img):\r\n\r\n img = img.astype(np.float32)\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if np.absolute(min-max) < 1.e-6:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)",
"def clamp(value, min_value, max_value):\n return max(min_value, min(value, max_value))",
"def normalize_value_range(image,\n vmin = -1,\n vmax = 1,\n in_min = 0,\n in_max = 255.0,\n clip_values = False):\n if in_min >= in_max or vmin >= vmax:\n raise ValueError('min must be strictly less than max')\n\n in_min_t = tf.constant(in_min, tf.float32)\n in_max_t = tf.constant(in_max, tf.float32)\n image = tf.cast(image, tf.float32)\n image = (image - in_min_t) / (in_max_t - in_min_t)\n image = vmin + image * (vmax - vmin)\n if clip_values:\n image = tf.clip_by_value(image, vmin, vmax)\n return image",
"def window_standardize(img, lower_bound, upper_bound):\n img = np.clip(img, lower_bound, upper_bound)\n # x=x*2-1: map x to [-1,1]\n img = 2 * (img - lower_bound) / (upper_bound - lower_bound) - 1\n return img",
"def histogram_stretching(img):\n\n img_copy = np.copy(img)\n\n img_min = img_copy.min()\n img_max = img_copy.max()\n\n if img_min == img_max:\n return None\n\n img_copy = (img_copy-img_min)/(img_max-img_min) * 255\n\n return img_copy",
"def set_limits_minmax(self, zmin, zmax):\n self.pixels.set_clim(zmin, zmax)\n self.autoscale = False",
"def get_scaled_image(self, min_width=1024):\n self.dispersion = self.image.getCalibration().pixelWidth\n self.offset = self.image.getCalibration().xOrigin\n binning = 1\n while binning * min_width < self.image.getWidth():\n binning *= 2\n if binning > 1:\n binning /= 2\n IJ.run(self.image, 'Select None', '')\n new_image = self.image.crop()\n IJ.run(new_image, 'Bin...', 'x=%d y=%d binning=Average' %\n (binning, binning))\n self.dispersion *= binning\n self.offset /= binning\n self.image = new_image\n return new_image",
"def limit(val, arr):\n # Make copy\n new = np.array(val)\n extr = minmax(arr)\n # Enforce lower bound\n new = np.maximum(new, extr[0])\n # Enforce upper bound\n new = np.minimum(new, extr[1])\n return new",
"def scale_image(image, new_range):\n min_val = np.min(image).astype(np.float32)\n max_val = np.max(image).astype(np.float32)\n min_val_new = np.array(min(new_range), dtype=np.float32)\n max_val_new = np.array(max(new_range), dtype=np.float32)\n scaled_image = (image - min_val) / (max_val - min_val) * (max_val_new - min_val_new) + min_val_new\n return scaled_image.astype(np.uint8)",
"def scale(value,rawmin=100941, rawmax=274919, rangemin=0, rangemax=100):\n\n # Convert the left range into a 0-1 range (float)\n valueScaled = float(value - rawmin) / float(rawmax - rawmin)\n\n # Convert the 0-1 range into a value in the right range.\n value = rangemin + (valueScaled * ((rangemax * 10) - rangemin))\n\n value = value // 10 * 10 // 10 # float to int\n\n return max(value, rangemin) # value must be greater or equal to rangemin",
"def SetPixelValueMinMax(self, min: 'float', max: 'float') -> \"void\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIF2_SetPixelValueMinMax(self, min, max)"
] |
[
"0.7628654",
"0.64297724",
"0.6390369",
"0.63346624",
"0.6329962",
"0.62751615",
"0.6227718",
"0.61965084",
"0.61759627",
"0.61569077",
"0.6136462",
"0.6117476",
"0.6113362",
"0.6112044",
"0.6108288",
"0.6075168",
"0.6021573",
"0.6020029",
"0.6000976",
"0.59824467",
"0.5964238",
"0.5925614",
"0.58722204",
"0.58629686",
"0.5862312",
"0.58477783",
"0.5831546",
"0.58259916",
"0.5818715",
"0.5817621"
] |
0.8371912
|
0
|
Set the user and term in the form.
|
def get_form_kwargs(self, **kwargs):
kwargs = super(ChallengeVerifyView, self).get_form_kwargs(**kwargs)
kwargs['display_term'] = self.display_term
kwargs['user'] = self.request.user
return kwargs
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_form(self, request, *args, **kwargs):\n form = super().get_form(request, *args, **kwargs)\n form.base_fields['author'].initial = request.user\n return form",
"def form_valid(self, form):\n form.instance.author = self.request.user\n return super(TaxonomyCreateView, self).form_valid(form)",
"def form_valid(self, form):\n form.instance.user = self.request.user\n return super().form_valid(form)",
"def form_valid(self, form):\n form.instance.author = self.request.user\n return super(TaxonomyUpdateView, self).form_valid(form)",
"def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form) # runs parent func",
"def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form) # runs parent func",
"def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)",
"def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)",
"def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)",
"def set_user(self, user):\r\n self.user = user",
"def set(self, username, password):\n\n # if there are no args, give them the form\n if not username and not password:\n return render('/login_form.html')\n\n # get the user by the username\n user_data = o.User.get_data(key=username)\n\n # no user data?\n if not user_data:\n add_flash('error','User not found')\n return render('/login_form.html')\n\n # check their password\n if not o.User.check_password(user_data,password):\n add_flash('error','Incorrect password')\n return render('/login_form.html')\n\n # set them as active user\n set_active_user(user_data.get('_hash'))",
"def form_valid(self, form):\n form.instance.human = self.request.user\n return super(CreateBookmark, self).form_valid(form)",
"def prefill(self, user):\n print('prefilling')\n self.username.data = user.username\n self.full_name.data = user.full_name\n self.email.data = user.email",
"def set_user_inputs(self):\n com_util.send_to(self.driver, element['clickOnEmail'], self.email)\n com_util.send_to(self.driver, element['clickOnPassword'], self.password)\n com_util.tap_on(self.driver, element['clickOnLogin'])",
"def form_valid(self, form):\n obj = form.save(commit=False)\n obj.user = self.request.user\n obj.save()\n return super().form_valid(form)",
"def terms(self, terms):\n\n self._terms = terms",
"def set_user(self, user):\n self._user = user",
"def enter_username(self):",
"def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user\n return kwargs",
"def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs",
"def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs",
"def __init__(self, *args, **kwargs):\n user = None\n if 'user' in kwargs:\n user = kwargs.pop('user')\n super(PersonForm, self).__init__(*args, **kwargs)\n if user:\n self.fields['username'].initial = user.username\n self.fields['first_name'].initial = user.first_name\n self.fields['last_name'].initial = user.last_name\n self.fields['email_address'].initial = user.email\n self.fields.keyOrder = [\n 'id', 'username', 'first_name', 'middle_name', 'last_name',\n 'email_address', 'gender',\n 'new_password', 'confirm_new_password', 'signature',\n 'signature_html', 'time_zone', 'language', 'show_signatures',\n 'avatar', 'autosubscribe', 'comment'\n ]",
"def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n\n self.fields[\"groups\"].label = \"roles\"",
"def set_form(self, form):\n self.parameters = form",
"def user_set_default_license(request):\n dbsession = DBSession()\n\n user_id = request.matchdict['user_id']\n user = User.get_by_user_id(user_id)\n\n form = Form(request, schema=UserDefaultLicenseSchema, obj=user)\n\n if 'form.submitted' in request.POST and form.validate():\n request.session.flash('form validated!')\n\n return {\n 'the_user_id': user_id,\n 'the_username': user.username,\n 'form': FormRenderer(form),\n }",
"def setUp(self):\n self.user = User.objects.get(username='Aslan')\n self.user.save()\n self.setUpFormData()\n self.form = CompoundForm(self.user, self.formData)",
"def __init__(self, *args, **kwargs):\n super(AddEventForm, self).__init__(*args)\n\n if kwargs.get('current_user') is not None:\n self.fields['speakers'].initial = kwargs.get('current_user')\n\n self.fields['speakers'].label_from_instance = self.label_from_instance",
"def __init__(self, request=None, *args, **kwargs):\r\n self.request = request\r\n self.user_cache = None\r\n super(CustomizeAuthenticationForm, self).__init__(*args, **kwargs)\r\n\r\n # Set the label for the \"username\" field.\r\n UserModel = get_user_model()\r\n self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)\r\n if self.fields['username'].label is None:\r\n self.fields['username'].label = capfirst(self.username_field.verbose_name)",
"def register(userid, term_list):\n\n user_dn = 'uid=%s,%s' % (ldapi.escape(userid), cfg['ldap_users_base'])\n\n if type(term_list) in (str, unicode):\n term_list = [ term_list ]\n\n ldap_member = get(userid)\n if ldap_member and 'term' not in ldap_member:\n ldap_member['term'] = []\n\n if not ldap_member:\n raise NoSuchMember(userid)\n\n new_member = ldap_member.copy()\n new_member['term'] = new_member['term'][:]\n\n for term in term_list:\n\n # check term syntax\n if not re.match('^[wsf][0-9]{4}$', term):\n raise InvalidTerm(term)\n\n # add the term to the entry\n if not term in ldap_member['term']:\n new_member['term'].append(term)\n\n mlist = ldapi.make_modlist(ldap_member, new_member)\n ld.modify_s(user_dn, mlist)",
"def set_user_info(self, usrs):\r\n logger.info('Starting set user profile info')\r\n user = choice(usrs)\r\n self.title = user['title']\r\n self.fname = user['fname']\r\n self.lname = user['lname']\r\n self.email = user['email']\r\n self.password = user['password']\r\n self.dob = user['dob']\r\n self.company = user['company']\r\n self.address = user['address']\r\n self.city = user['city']\r\n self.postalcode = user['postalcode']\r\n self.phone = user['phone']\r\n logger.info('Ending set user profile info')"
] |
[
"0.5942573",
"0.59352493",
"0.58183414",
"0.5817644",
"0.5779533",
"0.5779533",
"0.5769205",
"0.5769205",
"0.5769205",
"0.5612393",
"0.55744946",
"0.55603635",
"0.554265",
"0.5522575",
"0.55176884",
"0.55171174",
"0.5514617",
"0.55015975",
"0.5490745",
"0.54737824",
"0.54737824",
"0.54698575",
"0.5436991",
"0.54084504",
"0.5391018",
"0.53873473",
"0.5382849",
"0.5369842",
"0.5360666",
"0.53518146"
] |
0.60840124
|
0
|
Initialize each form in the formset with a challenge.
|
def get_form(self, form_class):
formset = super(ChallengeVerifyView, self).get_form(form_class)
challenges = Challenge.objects.select_related(
'candidate__user__userprofile', 'challenge_type').filter(
verifying_user=self.request.user, candidate__term=self.display_term)
for i, challenge in enumerate(challenges):
formset[i].instance = challenge
formset[i].initial = {
'verified': challenges[i].verified,
'reason': challenges[i].reason}
return formset
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def initial_formset_data(self, request, step, formset):\n return None",
"def initial_form_data(self, request, step, form):\n return None",
"def setUpFormData(self):\n self.formData = {'labGroup': '5', 'abbrev': 'etoh', 'name': 'ethanol', 'CAS_ID': '64-17-5', 'CSID': '682',\n 'chemicalClasses': [ChemicalClass.objects.get(label='Solv').pk]}",
"def test_view_initializes_formset_with_audit_initial_data(self):\n audit = AuditFactory(num_doctypes=3) # fixture\n\n formset = DocumentFormSet(audit_pk=audit.pk)\n\n expected_labels = {dt.name for dt in audit.required_doctypes.all()}\n forms_labels = {form.fields['file'].label for form in formset}\n self.assertSetEqual(expected_labels, forms_labels)\n\n expected_doctype_pks = {dt.pk for dt in audit.required_doctypes.all()}\n forms_pks = {form.initial['doctype'] for form in formset}\n self.assertSetEqual(expected_doctype_pks, forms_pks)",
"def test_missing_initial_fieldsets(self):\n original_initial_fieldsets = self.form.fieldsets\n print(\"========================= TEST UNABLE TO DELETE THE PROPERTY FOR TESTING ========================\")\n print(original_initial_fieldsets)\n print(\"--------------------------------------\")\n delattr(self.form, 'fieldsets')\n response_fieldsets = self.form.make_fieldsets()\n print(response_fieldsets)\n\n setattr(self.form, 'fieldsets', original_initial_fieldsets)",
"def get_setup_forms(self, wizard):\n return {} # pragma: no cover",
"def __init__(self):\n self.constant_fields = {}\n self.post_score_renames = {}\n self.form = None\n self.form_field_regex = None\n self.field_count = None\n\n self.set_generic_fields()\n self.set_specific_fields()\n self.set_post_score_renames()",
"def process_show_formset(self, request, step, formset):\n pass",
"def _construct_form(self, i, **kwargs):\n form = super(NestedFormSet, self)._construct_form(i, **kwargs)\n form.empty_permitted = False\n return form",
"def formset_factory(leafpack=None, taxon_forms=None): # type: (LeafPack, [LeafPackBugForm]) -> list\n form_list = []\n\n queryset = Macroinvertebrate.objects.filter(family_of=None).order_by('sort_priority')\n\n for taxon in queryset:\n if leafpack is not None:\n try:\n lpg = LeafPackBug.objects.get(bug=taxon.id, leaf_pack=leafpack.id)\n except ObjectDoesNotExist:\n lpg = LeafPackBug(bug=taxon, leaf_pack=leafpack)\n lpg.save()\n else:\n lpg = LeafPackBug(bug=taxon)\n\n order_bug_form = LeafPackBugForm(instance=lpg)\n\n families = taxon.families.all().order_by('common_name').order_by('sort_priority')\n family_bug_forms = list()\n if len(families) > 0:\n\n if leafpack is not None:\n\n child_taxons = []\n for child_bug in families:\n\n try:\n child_lpg = LeafPackBug.objects.get(bug=child_bug, leaf_pack=leafpack)\n except ObjectDoesNotExist:\n child_lpg = LeafPackBug(bug=child_bug, leaf_pack=leafpack)\n child_lpg.save()\n\n child_taxons.append(child_lpg)\n\n family_bug_forms = [LeafPackBugForm(instance=taxon) for taxon in child_taxons]\n else:\n family_bug_forms = [LeafPackBugForm(instance=LeafPackBug(bug=bug)) for bug in families]\n\n form_list.append((order_bug_form, family_bug_forms))\n\n # If taxon_forms is not None, plug bug_count values into new formset\n if taxon_forms is not None:\n\n def get_taxon_count(taxon_):\n for tf in taxon_forms:\n if tf.instance.bug == taxon_:\n return tf.instance.bug_count\n return 0\n\n for forms_ in taxon_forms:\n forms_[0].initial['bug_count'] = get_taxon_count(forms_[0].instance.bug)\n\n for form in forms_[1]:\n form.initial['bug_count'] = get_taxon_count(form.instance.bug)\n\n return form_list",
"def __init__(self,\n quiz_size_slug=Quiz.DEFAULT_QUIZ_SIZE_SLUG,\n *args, **kwargs):\n super(QuizForm, self).__init__(*args, **kwargs)\n quiz_json = QuizJson()\n question_count = Quiz.get_question_count_for_slug(quiz_size_slug)\n self.question_count = question_count\n\n for question_no in range(0, question_count):\n question_no_str = str(question_no)\n question_no_2_chars = question_no_str.zfill(2)\n question_key = 'question_' + question_no_2_chars\n form_question_no_str = str(question_no + 1)\n question_text = quiz_json.get_question_text(question_no)\n label = form_question_no_str + '. ' + question_text\n radio_widget = forms.RadioSelect(attrs={'class': 'quiz_answer'})\n choices = quiz_json.get_choices(question_no)\n self.fields[question_key] = forms.ChoiceField(\n widget=radio_widget, label=label, choices=choices\n )",
"def _construct_form(self, i, **kwargs):\n defaults = {'auto_id': self.auto_id, 'prefix': self.add_prefix(i)}\n if self.is_bound:\n defaults['data'] = self.data\n defaults['files'] = self.files\n if self.initial:\n try:\n defaults['initial'] = self.initial[i]\n except IndexError:\n pass\n # Allow extra forms to be empty.\n if i >= self.initial_form_count():\n defaults['empty_permitted'] = True\n defaults.update(kwargs)\n form = self.form(self.params[len(self.params) - i - 1][1], self.params[len(self.params) - i - 1][0], i, **defaults) #passando o params[i] para o form[i]\n self.add_fields(form, i)\n return form",
"def grouped_formset_factory(leafpack=None, taxon_forms=None):\n groupedform_list = []\n groupRS= LeafPackSensitivityGroup.objects.all()\n\n for gr in groupRS:\n\n groupRS = Macroinvertebrate.objects.filter(displayflag= True, sens_group=gr).order_by('display_order')\n bug_forms=[]\n for taxon in groupRS:\n if leafpack is not None:\n try:\n lpg = LeafPackBug.objects.get(bug=taxon.id, leaf_pack=leafpack.id)\n except ObjectDoesNotExist:\n lpg = LeafPackBug(bug=taxon, leaf_pack=leafpack)\n lpg.save()\n else:\n lpg = LeafPackBug(bug=taxon)\n \n bug_form = LeafPackBugForm(instance=lpg)\n bug_forms.append(bug_form)\n \n group ={}\n group['name']= 'Group {0}: {1}'.format(str(gr.id), gr.name)\n group['list']= bug_forms\n groupedform_list.append(group)\n\n # If taxon_forms is not None, plug bug_count values into new formset\n if taxon_forms is not None:\n\n def get_taxon_count(taxon_):\n for tf in taxon_forms:\n if tf.instance.bug == taxon_:\n return tf.instance.bug_count\n return 0\n\n for forms_ in taxon_forms:\n forms_[0].initial['bug_count'] = get_taxon_count(forms_[0].instance.bug)\n\n for form in forms_[1]:\n form.initial['bug_count'] = get_taxon_count(form.instance.bug)\n\n return groupedform_list",
"def minimal_form_data():\n\n form_data = { \n 'status': '0',\n 'title': 'Recurso de teste',\n 'description': 'Recurso para testes',\n 'abstract': 'Resumo',\n \n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0', \n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0', \n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',\n }\n\n return form_data",
"def prepare(self, form):\n \n return form",
"def _create_and_initialise_fields(self):\n for team in self.category.breaking_teams.all():\n self.fields[self._fieldname_remark(team)] = OptionalChoiceField(choices=BreakingTeam.REMARK_CHOICES, required=False)\n try:\n self.initial[self._fieldname_remark(team)] = self._bt(team).remark\n except KeyError:\n self.initial[self._fieldname_remark(team)] = None",
"def initialize(self):\n # FIX: INITIALIZE PROCESS INPUTS??\n for mech, value in self.initial_values.items():\n mech.initialize(value)",
"def setUp(self):\n self.user = User.objects.get(username='Aslan')\n self.user.save()\n self.setUpFormData()\n self.form = CompoundForm(self.user, self.formData)",
"def gen_formset(object_name, simulation, request=None):\n formset = None\n query = get_query(object_name, simulation)\n if object_name == 'centroid':\n if request:\n if query.exists():\n formset = CentroidFormSet(\n request.POST,\n prefix='centroid',\n simulation=simulation,\n )\n else:\n formset = CentroidFormSetExtra(\n request.POST,\n prefix='centroid',\n simulation=simulation,\n )\n else:\n if query.exists():\n formset = CentroidFormSet(\n queryset=query,\n prefix='centroid',\n simulation=simulation,\n )\n else:\n formset = CentroidFormSetExtra(\n queryset=query,\n prefix='centroid',\n simulation=simulation,\n )\n elif object_name == 'crossing':\n if request:\n if query.exists():\n formset = CrossingFormSet(\n request.POST,\n prefix='crossing',\n simulation=simulation,\n )\n else:\n formset = CrossingFormSetExtra(\n request.POST,\n prefix='crossing',\n simulation=simulation,\n )\n else:\n if query.exists():\n formset = CrossingFormSet(\n queryset=query,\n prefix='crossing',\n simulation=simulation,\n )\n else:\n formset = CrossingFormSetExtra(\n queryset=query,\n prefix='crossing',\n simulation=simulation,\n )\n elif object_name == 'link':\n if request:\n if query.exists():\n formset = LinkFormSet(\n request.POST,\n prefix='link',\n simulation=simulation,\n form_kwargs={'simulation': simulation},\n )\n else:\n formset = LinkFormSetExtra(\n request.POST,\n prefix='link',\n simulation=simulation,\n form_kwargs={'simulation': simulation},\n )\n else:\n if query.exists():\n formset = LinkFormSet(\n queryset=query,\n prefix='link',\n simulation=simulation,\n form_kwargs={'simulation': simulation}\n )\n else:\n formset = LinkFormSetExtra(\n queryset=query,\n prefix='link',\n simulation=simulation,\n form_kwargs={'simulation': simulation}\n )\n elif object_name == 'function':\n if request:\n if query.exists():\n formset = FunctionFormSet(\n request.POST,\n prefix='function',\n simulation=simulation,\n )\n else:\n formset = FunctionFormSetExtra(\n request.POST,\n prefix='function',\n simulation=simulation,\n )\n else:\n if query.exists():\n formset = FunctionFormSet(\n queryset=query,\n prefix='function',\n simulation=simulation,\n )\n else:\n formset = FunctionFormSetExtra(\n queryset=query,\n prefix='function',\n simulation=simulation,\n )\n return formset",
"def initialize_survey(self, **kwargs):",
"def test_make_fieldsets_saves_results(self):\n original_initial_fieldsets = getattr(self.form, 'fieldsets', None)\n initial_fieldsets = deepcopy(original_initial_fieldsets)\n original_computed_fieldsets = getattr(self.form, '_fieldsets', None)\n original_summary = getattr(self.form, '_fs_summary', None)\n self.assertIsNotNone(original_initial_fieldsets)\n self.assertIsNone(original_computed_fieldsets)\n self.assertIsNone(original_summary)\n response_fieldsets = self.form.make_fieldsets()\n label, summary = response_fieldsets.pop()\n self.assertIsNotNone(self.form._fieldsets)\n self.assertIsNotNone(self.form._fs_summary)\n self.assertEqual('summary', label)\n self.assertEqual(summary, self.form._fs_summary)\n self.assertEqual(response_fieldsets, self.form._fieldsets)\n self.assertEqual(initial_fieldsets, self.form.fieldsets)\n\n self.form.fieldsets = original_initial_fieldsets\n self.form._fieldsets = original_computed_fieldsets\n self.form._fs_summary = original_summary\n if original_computed_fieldsets is None:\n del self.form._fieldsets\n if original_summary is None:\n del self.form._fs_summary",
"def get_form_initial(self, step):\n initial_data = self.initial_dict.get(step, {}) # initial data of current step\n\n # SimulationForm0_LoadPdb\n step_0_prev_data = self.storage.get_step_data('0')\n step_0_prev_data = {} if step_0_prev_data is None \\\n else {'num_of_proteins': step_0_prev_data.get('0-num_of_proteins'),\n 'first_pdb_type': step_0_prev_data.get('0-first_pdb_type'),\n 'first_pdb_id': step_0_prev_data.get('0-first_pdb_id'),\n 'first_pdb_file': step_0_prev_data.get('0-first_pdb_file'),\n 'second_pdb_type': step_0_prev_data.get('0-second_pdb_type'),\n 'second_pdb_id': step_0_prev_data.get('0-second_pdb_id'),\n 'second_pdb_file': step_0_prev_data.get('0-second_pdb_file'),\n 'user_rand': step_0_prev_data.get('0-user_rand')}\n\n # SimulationForm1_DetermineRelativePosition\n step_1_prev_data = self.storage.get_step_data('1')\n step_1_prev_data = {} if step_1_prev_data is None \\\n else {'x1': step_1_prev_data.get('1-x1'), 'x2': step_1_prev_data.get('1-x2'),\n 'y1': step_1_prev_data.get('1-y1'), 'y2': step_1_prev_data.get('1-y2'),\n 'z1': step_1_prev_data.get('1-z1'), 'z2': step_1_prev_data.get('1-z2'),\n 'degXY_1': step_1_prev_data.get('1-degXY_1'), 'degYZ_1': step_1_prev_data.get('1-degYZ_1'),\n 'degXY_2': step_1_prev_data.get('1-degXY_2'), 'degYZ_2': step_1_prev_data.get('1-degYZ_2')}\n\n # SimulationForm2_SimulationParameters\n step_2_prev_data = self.storage.get_step_data('2')\n step_2_prev_data = {} if step_2_prev_data is None \\\n else {'temperature_scale': step_2_prev_data.get('2-temperature_scale'),\n 'temperature': step_2_prev_data.get('2-temperature'),\n 'time_step_number': step_2_prev_data.get('2-time_step_number')}\n\n update_data = {**step_0_prev_data, **step_1_prev_data, **step_2_prev_data, **initial_data}\n return self.initial_dict.get(step, update_data)",
"def test_build_forms_from_questionnaire(self):\n self.view._build_forms_from_questionnaire()\n self.assertIsInstance(self.view.form_list, types.DictType)\n self.assertIs(self.view.form_list.get(str(len(self.view.form_list) - 1)), TempLanguageForm)",
"def test_contains_forms(self):\n response = self.client.get(self.url)\n \n first_author_form = response.context.get('first_author_form')\n authors_fs = response.context.get('authors_fs')\n book_form = response.context.get('book_form')\n language_form = response.context.get('language_form')\n \n self.assertIsInstance(first_author_form, AuthorForm)\n self.assertIsInstance(authors_fs, AuthorFormSet)\n self.assertIsInstance(book_form, BookForm)\n self.assertIsInstance(language_form, LanguageForm)",
"def get_formset_form(formset: forms.BaseFormSet) -> forms.BaseForm:\n return formset._construct_form(0, **formset.get_form_kwargs(0)) # type: ignore",
"def service_questions(request, index_card_id, hazard_id, index_card_service_id):\n service_inf_elements = get_elements_by_component_name('Service Infrastructure')\n hard_inf_elements = get_elements_by_component_name('Hard Infrastructure')\n built_env_elements = get_elements_by_component_name('Built Environment')\n index_card = IndexCard.objects.get(id=index_card_id)\n index_card_service = index_card.indexcardservice_set.get(id=index_card_service_id)\n hazard = Hazard.objects.get(id=hazard_id)\n service_component_list = index_card_service.service.servicecomponent_set.all()\n service_component_list_count = len(service_component_list)\n\n if service_component_list_count == 1:\n QuestionFormSet = modelformset_factory(ServiceT1Question, max_num=1, exclude=[])\n if service_component_list_count == 2:\n QuestionFormSet = modelformset_factory(ServiceT2Question, max_num=1, exclude=[])\n if service_component_list_count == 3:\n QuestionFormSet = modelformset_factory(ServiceT3Question, max_num=1, exclude=[])\n if service_component_list_count == 4:\n QuestionFormSet = modelformset_factory(ServiceT4Question, max_num=1, exclude=[])\n if service_component_list_count == 5:\n QuestionFormSet = modelformset_factory(ServiceT5Question, max_num=1, exclude=[])\n\n if request.method == 'POST':\n formset = QuestionFormSet(request.POST, request.FILES)\n if formset.is_valid():\n formset.save()\n return render_to_response(\"crppindexcard/services_list.html\",\n {'index_card': index_card,\n 'hazard': hazard, 'service_inf_elements':service_inf_elements,\n 'hard_inf_elements':hard_inf_elements, 'built_env_elements':built_env_elements},\\\n context_instance=RequestContext(request))\n else:\n if format(len(formset.errors) > 0):\n num_errors = len(formset.errors[0])\n else:\n if service_component_list_count == 1:\n query_set = ServiceT1Question.objects.filter(index_card_service_id=index_card_service_id)\n template_name = \"crppindexcard/service_t1_questions.html\"\n\n\n if service_component_list_count == 2:\n query_set = ServiceT2Question.objects.filter(index_card_service_id=index_card_service_id)\n template_name = \"crppindexcard/service_t2_questions.html\"\n\n if service_component_list_count == 3:\n query_set = ServiceT3Question.objects.filter(index_card_service_id=index_card_service_id)\n template_name = \"crppindexcard/service_t3_questions.html\"\n\n if service_component_list_count == 4:\n query_set = ServiceT4Question.objects.filter(index_card_service_id=index_card_service_id)\n template_name = \"crppindexcard/service_t4_questions.html\"\n\n if service_component_list_count == 5:\n query_set = ServiceT5Question.objects.filter(index_card_service_id=index_card_service_id)\n template_name = \"crppindexcard/service_t5_questions.html\"\n\n formset = QuestionFormSet(queryset=query_set)\n return render_to_response(template_name,\n dict(formset=formset, index_card=index_card, hazard=hazard, service=index_card_service.service, \\\n constants=crppindexcard.constants), context_instance=RequestContext(request))",
"def bind_formset(formset):\n if formset.is_bound:\n # do nothing if the formset is already bound\n return formset\n \n bindData={}\n # the formset.get_default_prefix() and form.add_prefix() methods add in the \n # dict keys that uniquely identify the various form fields with the individual \n # instance data\n \n # add formset management form data\n bindData[formset.get_default_prefix()+\"-TOTAL_FORMS\"]=str(formset.management_form['TOTAL_FORMS'].value())\n bindData[formset.get_default_prefix()+\"-INITIAL_FORMS\"]=str(formset.management_form['INITIAL_FORMS'].value())\n bindData[formset.get_default_prefix()+\"-MIN_NUM_FORMS\"]=str(formset.management_form['MIN_NUM_FORMS'].value())\n bindData[formset.get_default_prefix()+\"-MAX_NUM_FORMS\"]=str(formset.management_form['MAX_NUM_FORMS'].value())\n for form in formset:\n if form.instance:\n # field data, get these values from the instance\n for fieldName,fieldValue in form.fields.iteritems():\n try:\n bindData[form.add_prefix(fieldName)]=getattr(form.instance,\n fieldName)\n except AttributeError:\n # this is an added field (i.e. DELETE), not derived from the\n # model, do nothing with it, since we are only binding instance\n # data to the form\n pass\n # hidden field data, get these from the field initial values set\n # when the form was created\n for field in form.hidden_fields():\n bindData[form.add_prefix(field.name)]=field.field.initial\n # create a new bound formset by passing in the bindData dict, this looks\n # to the formset constructor like a request.POST dict \n newFormset=formset.__class__(bindData,instance=formset.instance,\n error_class=formset.error_class)\n return newFormset",
"def get_form(self):\n # setup request layer\n self.request = TestRequest()\n # get add view\n form = getMultiAdapter((self.experiments, self.request),\n name=\"newSpeciesDistribution\")\n # update the form once to initialise all widgets\n form.update()\n # go through all widgets on the form and update the request with default values\n data = {}\n for widget in chain(\n form.widgets.values(),\n # skip standard plone groups\n #chain.from_iterable(g.widgets.values() for g in form.groups),\n chain.from_iterable(g.widgets.values() for g in form.param_groups)):\n data[widget.name] = widget.value\n data.update({\n 'form.widgets.IDublinCore.title': u\"My Experiment\",\n 'form.widgets.IDublinCore.description': u'This is my experiment description',\n 'form.widgets.functions': [self.algorithm.UID()], # BIOCLIM\n 'form.widgets.species_occurrence_dataset': [unicode(self.occur.UID())], # ABT\n 'form.widgets.species_absence_dataset': [unicode(self.absen.UID())],\n 'form.widgets.species_pseudo_absence_points': [],\n 'form.widgets.resolution': ('Resolution2_5m', ),\n # FIXME: shouldn't be necessary to use unicode here,... widget converter should take care of it\n 'form.widgets.environmental_datasets.item.0': unicode(self.current.UID()),\n 'form.widgets.environmental_datasets.item.0.item': [u'B01'],\n 'form.widgets.environmental_datasets.item.1': unicode(self.current.UID()),\n 'form.widgets.environmental_datasets.item.1.item': [u'B02'],\n 'form.widgets.environmental_datasets.count': '2',\n })\n self.request.form.update(data)\n form = getMultiAdapter((self.experiments, self.request),\n name=\"newSpeciesDistribution\")\n return form",
"def _populate(self):\n self.addDemographics()\n self.addLabs()\n self.addProblems()\n self.addMeds()\n self.addAllergies()\n self.addImmunizations()\n self.addVitals()\n self.populated_p = True",
"def section_questions(request, index_card_id, section_id):\n service_inf_elements = get_elements_by_component_name('Service Infrastructure')\n hard_inf_elements = get_elements_by_component_name('Hard Infrastructure')\n built_env_elements = get_elements_by_component_name('Built Environment')\n fields_to_show = \\\n ('statement','answer_type', 'answer_short', 'answer_long', 'comments')\n read_only_fields = ('comments')\n\n QuestionFormSet = modelformset_factory(Question, max_num=1, exclude=[])\n index_card = IndexCard.objects.get(id=index_card_id)\n section = index_card.section_set.get(id = section_id)\n\n if request.method == 'POST':\n formset = QuestionFormSet(request.POST, request.FILES)\n if formset.is_valid():\n formset.save()\n return render_to_response(\"crppindexcard/index.html\",\n {'index_card': index_card,\n \"service_inf_elements\": service_inf_elements,\n 'hard_inf_elements':hard_inf_elements,'built_env_elements':built_env_elements},\\\n context_instance=RequestContext(request))\n else:\n if format(len(formset.errors) > 0):\n num_errors = len(formset.errors[0])\n set_form_hidden_fields(formset, fields_to_show)\n set_form_readonly_fields(formset, read_only_fields)\n set_form_country_select(formset)\n else:\n query_set = Question.objects.filter(section_id = section_id).order_by('id')\n formset = QuestionFormSet(queryset=query_set)\n set_form_readonly_fields(formset, read_only_fields)\n set_form_hidden_fields(formset, fields_to_show)\n set_form_country_select(formset)\n\n return render_to_response(\"crppindexcard/section_questions.html\", {\n \"formset\": formset,\n \"index_card\": index_card,\n \"section\": section,\n }, context_instance=RequestContext(request))"
] |
[
"0.6577104",
"0.6018372",
"0.59315926",
"0.5917518",
"0.5716115",
"0.56829953",
"0.56514114",
"0.5570535",
"0.5529638",
"0.5509667",
"0.54883474",
"0.5474722",
"0.5455359",
"0.5445691",
"0.54168105",
"0.5375306",
"0.5359101",
"0.530911",
"0.5278513",
"0.5270086",
"0.5230901",
"0.5226786",
"0.5220012",
"0.52113265",
"0.5207074",
"0.5183671",
"0.5142951",
"0.51414734",
"0.51412255",
"0.5137204"
] |
0.6970292
|
0
|
Helper method that returns a dictionary containing a requirement name and a form, to be used for each requirement in the template.
|
def get_entry(name, req, form):
entry = {'requirement': name, 'form': form}
form.initial['credits_needed'] = 0
if req:
form.instance = req
form.initial['credits_needed'] = req.credits_needed
return entry
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def render_form(form):\n return {\n 'form': form,\n }",
"def _template_data(self):\n return {\"form\": self.form.render()}",
"def show_fieldsetform(form):\n return {'form': form, 'required_fields': True}",
"def forms(self):\n filter = self.filter_form\n search = self.search_form\n return {\n 'filter': filter,\n 'search': search\n }",
"def minimal_form_data():\n\n form_data = { \n 'status': '0',\n 'title': 'Recurso de teste',\n 'description': 'Recurso para testes',\n 'abstract': 'Resumo',\n \n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0', \n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0', \n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',\n }\n\n return form_data",
"def get_form_kwargs(self, i):\n return dict(request=self.request)",
"def fields(self, required=False):\n form = h.simplsale_form(self._index_xml)\n if required:\n required = '.required'\n else:\n required = ''\n elements = CSSSelector('input[type!=\"submit\"]%s, select%s'\n % (required, required))(form)\n names = []\n for e in elements:\n name = e.attrib.get('name', None)\n if name is not None:\n names.append(name)\n if 'billing_amount' in names and not required:\n names.extend(['billing_amount_price', 'billing_amount_name'])\n d = dict((key, '') for key in names)\n return d",
"def application_form():\n#def application_form(firstname,lastname,job,salary):\n\n #form = request.form\n #print form\n firstname = request.args.get(\"firstname\")\n lastname = request.args.get(\"lastname\")\n job = request.args.get(\"job\")\n salary = request.args.get(\"salary\")\n\n return render_template(\"./application-response.html\",\n firstname=firstname,\n lastname=lastname,\n job=job,\n salary=salary)",
"def forms(self):\r\n forms = FormsDict()\r\n for name, item in self.POST.iterallitems():\r\n if not hasattr(item, 'filename'):\r\n forms[name] = item\r\n return forms",
"def render_creation_form(request: Request):\n return templates.TemplateResponse(\"creation_form.html\",{'request': request})",
"def get_setup_forms(self, wizard):\n return {} # pragma: no cover",
"def render_form():",
"def make_fields(self):\n #Let's first get fields in material_information printer_information\n metadata = GUI.api.get_metadata()\n field_correct_form = filter(lambda field: field['form_name']=='material_information' or field['form_name'] == 'printer_information', metadata)\n rows_w_fields = []\n for field in field_correct_form:\n #make label\n row = []\n key = field['field_name']\n type = field['field_type']\n row.append(sg.Text(text = field['field_label'], key=key+\"_label\"))#keys for labels are key_label (ex. record_id_label)\n if(type == 'radio' or type == \"dropdown\"):\n options = utils.get_options(field)\n row.append(sg.Combo(options, key=key, disabled= True, metadata=True, enable_events=True))\n elif(type == \"yesno\"):\n options = [\"Yes\", \"No\"]\n row.append(sg.Combo(options, key=key, disabled= True, metadata=True, enable_events=True))\n elif(type == \"text\"):\n row.append(sg.Input(key=key, disabled=True, metadata=True))\n else:#descirptive\n row[0] = sg.Text(text = field['field_label'], key=key, metadata=True)#we only need text in this case\n rows_w_fields.append(row)\n return rows_w_fields",
"def describe_invalid_form(form):\n return dict((i.name, i.note) for i in form.inputs if i.note is not None)",
"def getForms(self, page):\n\n forms = HTTP.FORM_REGEX.findall(self.get[page]['data'])\n self.get[page]['forms'] = []\n for params, content in forms:\n form = {}\n\n form = dict(HTTP.PARAM_REGEX.findall(params))\n\n # Parsing regular inputs\n inputs = HTTP.INPUT_REGEX.findall(content)\n form['inputs'] = {}\n for e in inputs:\n params = dict(HTTP.PARAM_REGEX.findall(e))\n\n try: name = params['name']\n except: name = params['type']\n\n form['inputs'][name] = params\n if 'required' in params: form['inputs'][name]['required'] = True\n\n # Parsing text areas\n txtareas = HTTP.TXTAREA_REGEX.findall(content)\n for txtarea in txtareas:\n params = dict(HTTP.PARAM_REGEX.findall(txtarea))\n\n form['inputs'][params['name']] = params\n form['inputs'][params['name']]['type'] = 'textarea'\n if 'required' in params: form['inputs'][params['name']]['required'] = True\n\n # Parsing select inputs\n selects = HTTP.SELECT_REGEX.findall(content)\n for params, value in selects:\n params = dict(HTTP.PARAM_REGEX.findall(params))\n\n form['inputs'][params['name']] = params\n form['inputs'][params['name']]['type'] = 'select'\n form['inputs'][params['name']]['value'] = value\n if 'required' in params: form['inputs'][params['name']]['required'] = True\n\n\n self.get[page]['forms'].append(form)\n return self.get[page]['forms']",
"def form_GranularFormLayout(request):\n\n schema = schemaish.Structure()\n schema.add( 'firstName', schemaish.String(title='First Name', \\\n description='The name before your last one', \\\n validator=validatish.Required()) )\n\n form = formish.Form(schema, 'form')\n\n return form",
"def show_sendform(form):\n return {'form': form, 'required_fields': True}",
"def prepare_template(self, rest_handler, key=''):\n template_values = {}\n template_values['page_title'] = self.format_title('Edit Question')\n template_values['main_content'] = self.get_form(rest_handler, key=key)\n\n return template_values",
"def crear():\n\n return dict(form=form)",
"def create_template_dict(name, cat, boilerplate_name=None, is_common=False):\r\n return {\r\n \"display_name\": name,\r\n \"category\": cat,\r\n \"boilerplate_name\": boilerplate_name,\r\n \"is_common\": is_common\r\n }",
"def dict_form1(fac_data):\n #sort through last names\n #count number last names? maybe not\n #create key for each last name\n #if key exists, add list of values [degree, title -'of biostatistics', email]\n \n form_dict = {}\n\n for i in fac_data:\n #get name\n split_name = i['name'].split(\" \")\n last_name = split_name[len(split_name)-1]\n \n #build array of degree/title/email\n fixed_title = i[' title'].strip(\" of Biostatistics\")\n \n info = []\n info.append(i[' degree'])\n info.append(fixed_title)\n info.append(i[' email'])\n \n #add to dictionary\n if last_name in form_dict:\n form_dict[last_name].append([info])\n else:\n form_dict[last_name] = info\n\n return form_dict",
"def extract_data_from_form(\n self, data: JobForm, many: bool, **kwargs\n ) -> Dict[str, Any]:\n\n def slugify(text: str) -> str:\n return text.lower().strip().replace(\" \", \"-\")\n\n return {\n \"experiment_name\": slugify(data.experiment_name.data),\n \"queue\": slugify(data.queue.data),\n \"timeout\": data.timeout.data or None,\n \"entry_point\": data.entry_point.data,\n \"entry_point_kwargs\": data.entry_point_kwargs.data or None,\n \"depends_on\": data.depends_on.data or None,\n \"workflow\": data.workflow.data,\n }",
"def create_report(self, form_data):\n R = {}\n for e in self.entries:\n R[e.name] = e.get_value(form_data)\n \n return(R)",
"def req():\n\n if not current.auth.s3_logged_in():\n return None\n\n ADMIN = current.session.s3.system_roles.ADMIN\n settings = current.deployment_settings\n types = settings.get_req_req_type()\n\n get_vars = {}\n if len(types) == 1:\n t = types[0]\n if t == \"Stock\":\n get_vars = {\"type\": \"1\"}\n elif t == \"People\":\n get_vars = {\"type\": \"2\"}\n create_menu = M(\"Create\", m=\"create\", vars=get_vars)\n\n recurring = lambda i: settings.get_req_recurring()\n use_commit = lambda i: settings.get_req_use_commit()\n req_items = lambda i: \"Stock\" in types\n req_skills = lambda i: \"People\" in types\n\n return M(c=\"req\")(\n M(\"Current Needs\", f=\"organisation_needs\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\", restrict=[ADMIN]),\n ),\n M(\"Needs at Facilities\", f=\"site_needs\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Requests\", f=\"req\", vars=get_vars)(\n create_menu,\n M(\"List Recurring Requests\", f=\"req_template\", check=recurring),\n M(\"Map\", m=\"map\"),\n M(\"Report\", m=\"report\"),\n M(\"Search All Requested Items\", f=\"req_item\",\n check=req_items),\n M(\"Search All Requested Skills\", f=\"req_skill\",\n check=req_skills),\n ),\n M(\"Commitments\", f=\"commit\", check=use_commit)(\n ),\n M(\"Items\", c=\"supply\", f=\"item\")(\n M(\"Create\", m=\"create\"),\n M(\"Report\", m=\"report\"),\n M(\"Import\", m=\"import\", p=\"create\"),\n ),\n # Catalog Items moved to be next to the Item Categories\n #M(\"Catalog Items\", c=\"supply\", f=\"catalog_item\")(\n #M(\"Create\", m=\"create\"),\n #),\n M(\"Catalogs\", c=\"supply\", f=\"catalog\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Item Categories\", c=\"supply\", f=\"item_category\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )",
"def to_dict(self):\n result = {\n 'name': self.__class__.__name__,\n 'params': {\n 'requirements': {f: req for f, req in self.requirements.items()}\n }\n }\n\n return result",
"def get_requirement_info():\n links, requirements = [], []\n info = {'dependency_links': links, 'install_requires': requirements}\n requirements_path = 'requirements.txt'\n\n if not os.path.isfile(requirements_path):\n print('requirements.txt not found. Did you forget it?')\n return info\n\n reqs = filter(None, map(str.strip, open(requirements_path)))\n for line in reqs:\n if is_http(line):\n i = line.find('#egg=')\n if i == -1:\n raise SetupError('Missing \\'#egg=\\' in requirement link.')\n links.append(line[:i])\n requirements.append(line[i+5:])\n else:\n requirements.append(line)\n return info",
"def get_requirements(package):\n requirements: list = requires(package)\n requires_dict = defaultdict(dict)\n for requirement in requirements:\n req = Requirement(requirement)\n package_name, package_marker = req.name, req.marker\n if package_marker and \"extra ==\" in str(package_marker):\n group = str(package_marker).split(\"extra == \")[1].strip('\"').strip(\"'\").strip()\n else:\n group = \"required\"\n # De-duplicate (the same package could appear more than once in the extra == 'all' group)\n if package_name in requires_dict[group]:\n continue\n requires_dict[group][package_name] = req\n return requires_dict",
"def createFormatMap(self, form, renderable, **extras):\n\n fmtmap = renderable.__dict__.copy()\n fmtmap.update(extras)\n\n def replaceVars(match):\n\n try:\n var = match.group()[2:-1]\n if var and var.endswith(\":lexical\"):\n var = var[:-len(\":lexical\")]\n value = form.getFieldValue(var, lexical=True) or ''\n else:\n value = form.getFieldValue(var) or ''\n\n if not isinstance(value, str):\n if not hasattr(value, \"decode\"):\n value = str(value)\n value = value.decode('utf-8')\n return value\n except:\n return match.group()\n\n # process labels and hints\n if 'label' in fmtmap and fmtmap['label'] != None:\n fmtmap['label'] = VAREXP.sub(replaceVars, fmtmap['label'])\n if 'hint' in fmtmap and fmtmap['hint'] != None:\n fmtmap['hint'] = VAREXP.sub(replaceVars, fmtmap['hint'])\n if 'text' in fmtmap and fmtmap['text'] != None:\n fmtmap['text'] = VAREXP.sub(replaceVars, fmtmap['text'])\n if 'placeholder' in fmtmap and fmtmap['placeholder'] != None:\n fmtmap['placeholder'] = VAREXP.sub(replaceVars,\n fmtmap['placeholder'])\n\n # defaults\n extra_classes = {'relevant': True, 'required': False,\n 'readonly': False, 'error': False}\n\n # Let's see whether we got properties here...\n try:\n if hasattr(renderable, 'bind') and renderable.bind:\n # Requiredness\n if form.model.isRequired(renderable.bind, form.data):\n extra_classes[\"required\"] = True\n\n if not form.model.isRelevant(renderable.bind, form.data):\n extra_classes[\"relevant\"] = False\n\n # Read only\n if form.model.isReadonly(renderable.bind, form.data):\n extra_classes[\"readonly\"] = True\n\n elif hasattr(renderable, 'getRenderables') and \\\n callable(renderable.getRenderables):\n\n # Group relevance\n if not form.model.isGroupRelevant(renderable, form.data):\n extra_classes[\"relevant\"] = False\n\n except:\n pass\n\n if extras.get(\"errors\", None) and \\\n hasattr(renderable, 'bind') and renderable.bind and \\\n extras['errors'].get(renderable.bind, None):\n\n extra_classes['error'] = True\n\n if getattr(renderable, 'alert', ''):\n fmtmap['alert'] = renderable.alert\n else:\n fmtmap['alert'] = \"; \".join(extras['errors'][renderable.bind])\n\n else:\n\n fmtmap['alert'] = ''\n\n if \"extra_classes\" in fmtmap:\n fmtmap['extra_classes'] = \" \".join([fmtmap['extra_classes']] + \\\n [key for key in\n list(extra_classes.keys())\n if extra_classes[key]])\n else:\n fmtmap['extra_classes'] = \" \".join([key for key in\n list(extra_classes.keys()) if\n extra_classes[key]])\n\n fmtmap['type'] = self.getType(renderable)\n\n return fmtmap",
"def form_errors(form):\n errors = {}\n max_name_length = Item.name.property.columns[0].type.length\n if not form.get('name', None):\n errors['name'] = 'Please enter a name.'\n elif len(form['name']) > max_name_length:\n errors['name'] = (\n 'Name must be less than %s characters.' % max_name_length\n )\n if not Catagory.exists(form.get('catagory', None)):\n errors['catagory'] = 'Not a valid catagory.'\n if not form.get('description', None):\n errors['description'] = 'Please enter a description.'\n return errors",
"def show_fieldsetform_nrf(form):\n return {'form': form, 'required_fields': False}"
] |
[
"0.630402",
"0.61594725",
"0.6050221",
"0.5984636",
"0.57623315",
"0.5739988",
"0.5738215",
"0.56827587",
"0.56365216",
"0.5631884",
"0.55824167",
"0.5575974",
"0.5572623",
"0.5571467",
"0.5521887",
"0.5512704",
"0.5506832",
"0.5498103",
"0.5493591",
"0.5474122",
"0.5456768",
"0.5431809",
"0.5424916",
"0.53970873",
"0.5395401",
"0.53799164",
"0.53733027",
"0.53680944",
"0.53588015",
"0.5340805"
] |
0.67510563
|
0
|
Set the candidate of the challenge to the requester.
|
def form_valid(self, form):
form.instance.candidate = self.candidate
messages.success(self.request, 'Challenge requested!')
return super(CandidatePortalView, self).form_valid(form)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def challenge(self, challenge):\n\n self._challenge = challenge",
"def candidate_id(self, candidate_id):\n\n self._candidate_id = candidate_id",
"def candidate_id(self, candidate_id):\n\n self._candidate_id = candidate_id",
"def challenge_response(self, challenge_response):\n\n self._challenge_response = challenge_response",
"def candidate_name(self, candidate_name):\n\n self._candidate_name = candidate_name",
"def candidate_name(self, candidate_name):\n\n self._candidate_name = candidate_name",
"def review_requested_by(self, review_requested_by):\n\n self._review_requested_by = review_requested_by",
"def tag_challenge(self, tag_challenge):\n\n self._tag_challenge = tag_challenge",
"def set_chosen_card(self, allowed_cards, chosen_card):\n if self.action is not None:\n if self.action in allowed_cards:\n logger.info(f\"Successfully chose the card: {self.action}\")\n chosen_card = self.action\n else:\n logger.error(f\"{self.action} is not a valid card! Choosing the first allowed card now.\")\n else:\n logger.debug(\"chosen card is None\")\n return chosen_card",
"def candidate_office(self, candidate_office):\n\n self._candidate_office = candidate_office",
"def candidate_first_name(self, candidate_first_name):\n\n self._candidate_first_name = candidate_first_name",
"def candidate_first_name(self, candidate_first_name):\n\n self._candidate_first_name = candidate_first_name",
"def candidate_prefix(self, candidate_prefix):\n\n self._candidate_prefix = candidate_prefix",
"def accept(self, responder):\n try:\n with transaction.atomic():\n self._apply_decision(self.Status.ACCEPTED, responder)\n # update the user credentials\n user = self.user\n user.is_credentialed = True\n user.credential_datetime = timezone.now()\n user.save()\n except DatabaseError:\n messages.error(request, 'Database error. Please try again.')",
"def accept(self, candidate):\n candidate_fitness = self.fitness(candidate)\n if candidate_fitness < self.cur_fitness:\n self.cur_fitness, self.cur_solution = candidate_fitness, candidate\n if candidate_fitness < self.best_fitness:\n self.best_fitness, self.best_solution = candidate_fitness, candidate\n else:\n if random.random() < self.p_accept(candidate_fitness):\n self.cur_fitness, self.cur_solution = candidate_fitness, candidate",
"def _update_challenge(request: PipelineRequest, challenger: \"PipelineResponse\") -> HttpChallenge:\n\n challenge = HttpChallenge(\n request.http_request.url,\n challenger.http_response.headers.get(\"WWW-Authenticate\"),\n response_headers=challenger.http_response.headers,\n )\n ChallengeCache.set_challenge_for_url(request.http_request.url, challenge)\n return challenge",
"def set_candidate(self, roster):\n if len(self.candidate_list) < self.max_participants and len(roster) > 0:\n random.shuffle(roster)\n participant = roster.pop()\n self.candidate_list.append(participant)",
"def challenge_name(self, challenge_name):\n\n self._challenge_name = challenge_name",
"def request_answer_validation(self, answer, requested_by, validator):\n validation_request = ReferralAnswerValidationRequest.objects.create(\n validator=validator,\n answer=answer,\n )\n activity = ReferralActivity.objects.create(\n actor=requested_by,\n verb=ReferralActivityVerb.VALIDATION_REQUESTED,\n referral=self,\n item_content_object=validation_request,\n )\n Mailer.send_validation_requested(\n validation_request=validation_request,\n activity=activity,\n )",
"def test_candidate_update(self):\r\n self.register_user()\r\n result = self.login_user()\r\n access_token = json.loads(result.data.decode())['access_token']\r\n\r\n # first, we create a candidate by making a POST request\r\n rv = self.client().post('/candidate',headers=dict(Authorization=access_token),data=self.candidate)\r\n self.assertEqual(rv.status_code, 201)\r\n results = json.loads(rv.data.decode())\r\n\r\n #then, we edit the created candidate by making a PUT request\r\n rv = self.client().put('/candidate',headers=dict(Authorization=access_token),\r\n data={\"enrolement_no\":results['enrolement_no'],\r\n \"name\": \"Ali\", \"address\":\"Islamabad\", \"degree_name\":\"BSCS\"})\r\n self.assertEqual(rv.status_code, 200)\r\n\r\n # finally, we get the edited candidate to see if it is actually edited.\r\n results = self.client().get('/candidate',headers=dict(Authorization=access_token),data={'enrolement_no':results['enrolement_no']})\r\n self.assertEqual(result.status_code, 200)",
"def candidate(self):\n return Candidate(self.lan_address, self.tunnel)",
"def set_known_creator(self, target_item, creator_Q, reference):\n creator_item = self.wd.QtoItemPage(creator_Q)\n self.wd.addNewClaim(\n u'P170',\n WD.Statement(creator_item),\n target_item,\n reference)",
"def challenge_id(self, challenge_id):\n\n self._challenge_id = challenge_id",
"def challenge_id(self, challenge_id):\n\n self._challenge_id = challenge_id",
"def set_requester_registered(self, requester_id: str, val: bool) -> None:\n self.ensure_requester_exists(requester_id)\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n UPDATE requesters\n SET is_registered = ?\n WHERE requester_id = ?\n \"\"\",\n (val, requester_id),\n )\n conn.commit()\n return None",
"def set_picked_card(self, player_id: int, game_round_id: int, slack_user_hash: str, position: int,\n card: PlayerHandCardType):\n with self.eng.session_mgr() as session:\n # Move card to player_pick\n session.add(TablePlayerPick(\n player_key=player_id,\n game_round_key=game_round_id,\n slack_user_hash=slack_user_hash,\n card_order=position,\n answer_card_key=card.answer_card_key\n ))\n # Mark card in the hand as picked\n session.query(TablePlayerHand).filter(and_(\n TablePlayerHand.player_key == player_id,\n TablePlayerHand.hand_id == card.hand_id\n )).update({\n TablePlayerHand.is_picked: True\n })\n # Increment times picked\n session.query(TableAnswerCard).filter(\n TableAnswerCard.answer_card_id == card.answer_card_key\n ).update({\n TableAnswerCard.times_picked: TableAnswerCard.times_picked + 1\n })",
"def perform_set_score(responder, options):\n match = options['<match-id>']\n tla = options['<tla>']\n score = options['<score>']\n scores.set_match_score(match, tla, score)\n responder('Scored {0} points for {1} in match {2}'.format(score, tla, match))",
"def _set_request(self, request):\n self._request = request",
"async def accept_challenge(\n self, user_id, team, *, delay=0, lifespan=math.inf\n ):\n await self.upload_team(team)\n await self.use_command(\n \"\", \"accept\", user_id, delay=delay, lifespan=lifespan\n )",
"def _apply_decision(self, decision, responder):\n self.responder = responder\n self.status = decision\n self.decision_datetime = timezone.now()\n self.save()"
] |
[
"0.6499316",
"0.60932803",
"0.60932803",
"0.5781735",
"0.57699656",
"0.57699656",
"0.5516179",
"0.54925066",
"0.545744",
"0.54380524",
"0.5412529",
"0.5412529",
"0.5386207",
"0.52246284",
"0.5224182",
"0.5198079",
"0.5193528",
"0.51481795",
"0.5087636",
"0.50844556",
"0.5035526",
"0.5014091",
"0.50055784",
"0.50055784",
"0.49988306",
"0.49719766",
"0.49334478",
"0.4927394",
"0.4878043",
"0.48646906"
] |
0.61780286
|
1
|
Endpoint for updating a candidate's initiation status. The post parameters "candidate" and "initiated" specify the candidate (by Candidate pk) and their new initiation status, respectively.
|
def update_candidate_initiation_status(request):
candidate_pk = request.POST.get('candidate')
if not candidate_pk:
return json_response(status=404)
candidate = get_object_or_none(Candidate, pk=candidate_pk)
initiated = json.loads(request.POST.get('initiated'))
if not candidate or initiated is None:
return json_response(status=400)
candidate.initiated = initiated
candidate.save(update_fields=['initiated'])
# TODO(sjdemartini): Update LDAP-related information, like removal from
# or addition to relevant groups.
# TODO(sjdemartini): Update relevant mailing lists, moving initiated
# candidates off of the candidates list and onto the members list.
return json_response()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_candidate_update(self):\r\n self.register_user()\r\n result = self.login_user()\r\n access_token = json.loads(result.data.decode())['access_token']\r\n\r\n # first, we create a candidate by making a POST request\r\n rv = self.client().post('/candidate',headers=dict(Authorization=access_token),data=self.candidate)\r\n self.assertEqual(rv.status_code, 201)\r\n results = json.loads(rv.data.decode())\r\n\r\n #then, we edit the created candidate by making a PUT request\r\n rv = self.client().put('/candidate',headers=dict(Authorization=access_token),\r\n data={\"enrolement_no\":results['enrolement_no'],\r\n \"name\": \"Ali\", \"address\":\"Islamabad\", \"degree_name\":\"BSCS\"})\r\n self.assertEqual(rv.status_code, 200)\r\n\r\n # finally, we get the edited candidate to see if it is actually edited.\r\n results = self.client().get('/candidate',headers=dict(Authorization=access_token),data={'enrolement_no':results['enrolement_no']})\r\n self.assertEqual(result.status_code, 200)",
"def add_candidate_to_election(self, election_id: str, candidate_id: str) -> dict:",
"def startElection(self):\n self.role = 'candidate'\n self.leader_id = None\n self.resetElectionTimeout()\n self.current_term += 1\n self.votes = [self.datacenter_id]\n self.voted_for = self.datacenter_id\n dictobj = {'current_term': self.current_term, 'voted_for': self.voted_for, 'log': self.log}\n filename = \"./state\"+self.datacenter_id+'.pkl'\n fileobj = open(filename, 'wb')\n pickle.dump(dictobj, fileobj)\n fileobj.close()\n\n logging.debug('become candidate for term {}'.format(self.current_term))\n\n # handle the case where only one server is left\n if not self.isLeader() and self.enoughForLeader(self.votes):\n self.becomeLeader()\n\n # send RequestVote to all other servers\n # (index & term of last log entry)\n self.server.requestVote(self.current_term, self.getLatest()[0], \\\n self.getLatest()[1])",
"def form_valid(self, form):\n form.instance.candidate = self.candidate\n messages.success(self.request, 'Challenge requested!')\n return super(CandidatePortalView, self).form_valid(form)",
"def test_POST_candidate(self):\n self.init_elect_types()\n userA = models.User(\n name = \"UserA\",\n email = \"[email protected]\",\n password = \"asdf\")\n session.add(userA)\n session.commit()\n\n electionA = models.Election(\n title = \"Election A\",\n admin_id = userA.id,\n )\n session.add(electionA)\n session.commit()\n\n raceA = models.Race(\n title = \"Race A\",\n election_id = electionA.id\n )\n session.add(raceA)\n session.commit()\n\n data = {\n \"title\": \"Candidate A\",\n \"race_id\": raceA.id\n }\n\n response = self.client.post(\"/api/candidates\",\n data=json.dumps(data),\n content_type=\"application/json\",\n headers=[(\"Accept\", \"application/json\")]\n )\n\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.mimetype, \"application/json\")\n self.assertEqual(urlparse(response.headers.get(\"Location\")).path,\n \"/api/candidates/1\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data[\"id\"], 1)\n self.assertEqual(data[\"title\"], \"Candidate A\")\n\n candidates = session.query(models.Candidate).all()\n self.assertEqual(len(candidates), 1)\n\n candidate = candidates[0]\n self.assertEqual(candidate.title, \"Candidate A\")",
"def create_candidate_vote(election_id, candidate_id): # noqa: E501\n try:\n election_id = Web3.toChecksumAddress(election_id)\n except ValueError:\n message = ('Election ID %s is not an Ethereum address' %\n election_id)\n error = Error(\n code=1,\n message=message\n )\n return error, 400\n contract = current_app.config['FACTORY_CONTRACT']\n if not contract.call('electionExist', [election_id]):\n message = (\"Election with id %s does not exist\" % election_id)\n error = Error(\n code=2,\n message=message\n )\n return error, 404\n provider = contract.provider\n election_abi = current_app.config['ELECTION_ABI']\n contract = Ethereum(provider, election_id, election_abi)\n candidates_number = contract.call('numCandidates')\n if candidate_id < 0 or candidate_id > candidates_number:\n message = (\"Candidate with id %s does not exist\" % candidate_id)\n error = Error(\n code=2,\n message=message\n )\n return error, 404\n if connexion.request.is_json:\n info = connexion.request.get_json()\n\n has_voted = contract.call('voted', [info['account']])\n if has_voted:\n error = Error(\n code=6,\n message='Already voted before'\n )\n return error, 401\n\n contract.send('vote', info['account'], info['pkey'], [candidate_id])",
"def candidate_id(self, candidate_id):\n\n self._candidate_id = candidate_id",
"def candidate_id(self, candidate_id):\n\n self._candidate_id = candidate_id",
"def augment_candidate_with_ultimate_election_date(candidate, elections_dict={}):\n candidate_ultimate_election_date = None\n candidate_year = None\n status = ''\n success = True\n values_changed = False\n\n if not candidate or not hasattr(candidate, 'candidate_ultimate_election_date'):\n status += \"CANDIDATE_MISSING \"\n success = False\n return {\n 'candidate': candidate,\n 'elections_dict': elections_dict,\n # 'latest_office_we_vote_id': latest_office_we_vote_id,\n 'success': success,\n 'status': status,\n 'values_changed': values_changed,\n }\n candidate_list_manager = CandidateListManager()\n results = candidate_list_manager.retrieve_candidate_to_office_link_list(\n candidate_we_vote_id_list=[candidate.we_vote_id],\n read_only=True)\n candidate_to_office_link_list = results['candidate_to_office_link_list']\n latest_election_date = 0\n # latest_office_we_vote_id = ''\n for candidate_to_office_link in candidate_to_office_link_list:\n try:\n if candidate_to_office_link.google_civic_election_id in elections_dict:\n this_election = elections_dict[candidate_to_office_link.google_civic_election_id]\n else:\n this_election = candidate_to_office_link.election()\n try:\n if positive_value_exists(this_election.google_civic_election_id) \\\n and this_election.google_civic_election_id not in elections_dict:\n elections_dict[this_election.google_civic_election_id] = this_election\n except Exception as e:\n status += \"COULD_NOT_ADD_ELECTION_TO_DICT: \" + str(e) + \" \"\n election_day_as_integer = convert_we_vote_date_string_to_date_as_integer(this_election.election_day_text)\n if election_day_as_integer > latest_election_date:\n candidate_ultimate_election_date = election_day_as_integer\n election_day_as_string = str(election_day_as_integer)\n year = election_day_as_string[:4]\n if year:\n candidate_year = convert_to_int(year)\n latest_election_date = election_day_as_integer\n # latest_office_we_vote_id = candidate_to_office_link.contest_office_we_vote_id\n except Exception as e:\n status += \"PROBLEM_GETTING_ELECTION_INFORMATION: \" + str(e) + \" \"\n\n # Now that we have cycled through all the candidate_to_office_link_list, augment the candidate\n if positive_value_exists(candidate_ultimate_election_date) \\\n and candidate_ultimate_election_date != candidate.candidate_ultimate_election_date:\n candidate.candidate_ultimate_election_date = candidate_ultimate_election_date\n values_changed = True\n if positive_value_exists(candidate_year) \\\n and candidate_year != candidate.candidate_year:\n candidate.candidate_year = candidate_year\n values_changed = True\n return {\n 'candidate': candidate,\n 'elections_dict': elections_dict,\n # 'latest_office_we_vote_id': latest_office_we_vote_id,\n 'success': success,\n 'status': status,\n 'values_changed': values_changed,\n }",
"def update_candidate_details_from_politician(candidate=None, politician=None):\n fields_updated = []\n status = ''\n success = True\n save_changes = False\n\n if not hasattr(candidate, 'supporters_count') or not hasattr(politician, 'supporters_count'):\n save_changes = False\n success = False\n status += 'UPDATE_CANDIDATE_FROM_POLITICIAN_MISSING_REQUIRED_ATTRIBUTES '\n results = {\n 'candidate': candidate,\n 'fields_updated': fields_updated,\n 'save_changes': save_changes,\n 'status': status,\n 'success': success,\n }\n return results\n\n try:\n if positive_value_exists(candidate.id):\n if not positive_value_exists(candidate.ballotpedia_candidate_name) and \\\n positive_value_exists(politician.ballotpedia_politician_name):\n candidate.ballotpedia_candidate_name = politician.ballotpedia_politician_name\n fields_updated.append('ballotpedia_candidate_name')\n save_changes = True\n if not positive_value_exists(candidate.ballotpedia_candidate_url) and \\\n positive_value_exists(politician.ballotpedia_politician_url):\n candidate.ballotpedia_candidate_url = politician.ballotpedia_politician_url\n fields_updated.append('ballotpedia_candidate_url')\n save_changes = True\n # For identically named fields - no existing value\n results = copy_field_value_from_object1_to_object2(\n object1=politician,\n object2=candidate,\n object1_field_name_list=[\n 'instagram_followers_count',\n 'instagram_handle',\n 'linkedin_url',\n 'photo_url_from_vote_usa',\n 'vote_usa_profile_image_url_https',\n 'wikipedia_url',\n 'youtube_url',\n ],\n only_change_object2_field_if_incoming_value=True,\n only_change_object2_field_if_no_existing_value=True)\n candidate = results['object2'] if results['success'] and results['values_changed'] else candidate\n save_changes = save_changes or results['values_changed']\n fields_updated_append = results['fields_updated']\n for new_field in fields_updated_append:\n if new_field not in fields_updated:\n fields_updated.append(new_field)\n # For identically named fields - lock together existing values\n results = copy_field_value_from_object1_to_object2(\n object1=politician,\n object2=candidate,\n object1_field_name_list=[\n 'ballot_guide_official_statement',\n ],\n only_change_object2_field_if_incoming_value=False,\n only_change_object2_field_if_no_existing_value=False)\n candidate = results['object2'] if results['success'] and results['values_changed'] else candidate\n save_changes = save_changes or results['values_changed']\n fields_updated_append = results['fields_updated']\n for new_field in fields_updated_append:\n if new_field not in fields_updated:\n fields_updated.append(new_field)\n\n if positive_value_exists(politician.politician_name):\n if positive_value_exists(candidate.candidate_name) \\\n and candidate.candidate_name != politician.politician_name:\n # Make sure current candidate_name is in the google_civic_candidate_name fields\n results = add_name_to_next_spot(candidate, candidate.candidate_name)\n if results['success'] and results['values_changed']:\n candidate = results['candidate_or_politician']\n save_changes = True\n if positive_value_exists(results['field_updated']):\n fields_updated.append(results['field_updated'])\n elif not results['success']:\n status += \"FAILED_TO_ADD_CANDIDATE_NAME: \" + results['status']\n if candidate.candidate_name != politician.politician_name:\n candidate.candidate_name = politician.politician_name\n save_changes = True\n fields_updated.append('candidate_name')\n if positive_value_exists(politician.google_civic_candidate_name):\n results = add_name_to_next_spot(candidate, politician.google_civic_candidate_name)\n if results['success'] and results['values_changed']:\n candidate = results['candidate_or_politician']\n save_changes = True\n if positive_value_exists(results['field_updated']) \\\n and results['field_updated'] not in fields_updated:\n fields_updated.append(results['field_updated'])\n elif not results['success']:\n status += \"FAILED_TO_ADD_GOOGLE_CIVIC_CANDIDATE_NAME: \" + results['status']\n if positive_value_exists(politician.google_civic_candidate_name2):\n results = add_name_to_next_spot(candidate, politician.google_civic_candidate_name2)\n if results['success'] and results['values_changed']:\n candidate = results['candidate_or_politician']\n save_changes = True\n if positive_value_exists(results['field_updated']) \\\n and results['field_updated'] not in fields_updated:\n fields_updated.append(results['field_updated'])\n elif not results['success']:\n status += \"FAILED_TO_ADD_GOOGLE_CIVIC_CANDIDATE_NAME2: \" + results['status']\n if positive_value_exists(politician.google_civic_candidate_name3):\n results = add_name_to_next_spot(candidate, politician.google_civic_candidate_name3)\n if results['success'] and results['values_changed']:\n candidate = results['candidate_or_politician']\n save_changes = True\n if positive_value_exists(results['field_updated']) \\\n and results['field_updated'] not in fields_updated:\n fields_updated.append(results['field_updated'])\n elif not results['success']:\n status += \"FAILED_TO_ADD_GOOGLE_CIVIC_CANDIDATE_NAME3: \" + results['status']\n # Facebook\n if positive_value_exists(politician.facebook_url) and not politician.facebook_url_is_broken:\n candidate.facebook_url = politician.facebook_url\n save_changes = True\n if 'facebook_url' not in fields_updated:\n fields_updated.append('facebook_url')\n elif positive_value_exists(politician.facebook_url2) and not politician.facebook_url2_is_broken:\n candidate.facebook_url = politician.facebook_url2\n save_changes = True\n if 'facebook_url' not in fields_updated:\n fields_updated.append('facebook_url')\n elif positive_value_exists(politician.facebook_url3) and not politician.facebook_url3_is_broken:\n candidate.facebook_url = politician.facebook_url3\n save_changes = True\n if 'facebook_url' not in fields_updated:\n fields_updated.append('facebook_url')\n # Email\n if positive_value_exists(politician.politician_email):\n candidate.candidate_email = politician.politician_email\n save_changes = True\n if 'candidate_email' not in fields_updated:\n fields_updated.append('candidate_email')\n elif positive_value_exists(politician.politician_email2):\n candidate.candidate_email = politician.politician_email2\n save_changes = True\n if 'candidate_email' not in fields_updated:\n fields_updated.append('candidate_email')\n elif positive_value_exists(politician.politician_email3):\n candidate.candidate_email = politician.politician_email3\n save_changes = True\n if 'candidate_email' not in fields_updated:\n fields_updated.append('candidate_email')\n # Phone\n if positive_value_exists(politician.politician_phone_number):\n candidate.candidate_phone = politician.politician_phone_number\n save_changes = True\n if 'candidate_phone' not in fields_updated:\n fields_updated.append('candidate_phone')\n elif positive_value_exists(politician.politician_phone_number2):\n candidate.candidate_phone = politician.politician_phone_number2\n save_changes = True\n if 'candidate_phone' not in fields_updated:\n fields_updated.append('candidate_phone')\n elif positive_value_exists(politician.politician_phone_number3):\n candidate.candidate_phone = politician.politician_phone_number3\n save_changes = True\n if 'candidate_phone' not in fields_updated:\n fields_updated.append('candidate_phone')\n # Twitter Handle\n if positive_value_exists(politician.politician_twitter_handle):\n results = add_twitter_handle_to_next_candidate_spot(\n candidate, politician.politician_twitter_handle)\n if results['success'] and results['values_changed']:\n candidate = results['candidate']\n save_changes = True\n if positive_value_exists(results['field_updated']):\n fields_updated.append(results['field_updated'])\n if positive_value_exists(politician.politician_twitter_handle2):\n results = add_twitter_handle_to_next_candidate_spot(\n candidate, politician.politician_twitter_handle2)\n if results['success'] and results['values_changed']:\n candidate = results['candidate']\n save_changes = True\n if positive_value_exists(results['field_updated']):\n fields_updated.append(results['field_updated'])\n if positive_value_exists(politician.politician_twitter_handle3):\n results = add_twitter_handle_to_next_candidate_spot(\n candidate, politician.politician_twitter_handle3)\n if results['success'] and results['values_changed']:\n candidate = results['candidate']\n save_changes = True\n if positive_value_exists(results['field_updated']):\n fields_updated.append(results['field_updated'])\n if positive_value_exists(politician.politician_twitter_handle4):\n results = add_twitter_handle_to_next_candidate_spot(\n candidate, politician.politician_twitter_handle4)\n if results['success'] and results['values_changed']:\n candidate = results['candidate']\n save_changes = True\n if positive_value_exists(results['field_updated']):\n fields_updated.append(results['field_updated'])\n if positive_value_exists(politician.politician_twitter_handle5):\n results = add_twitter_handle_to_next_candidate_spot(\n candidate, politician.politician_twitter_handle5)\n if results['success'] and results['values_changed']:\n candidate = results['candidate']\n save_changes = True\n if positive_value_exists(results['field_updated']):\n fields_updated.append(results['field_updated'])\n # Contact Form URL\n if not positive_value_exists(candidate.candidate_contact_form_url) and \\\n positive_value_exists(politician.politician_contact_form_url):\n candidate.candidate_contact_form_url = politician.politician_contact_form_url\n fields_updated.append('candidate_contact_form_url')\n save_changes = True\n # URL\n if positive_value_exists(politician.politician_url):\n candidate.candidate_url = politician.politician_url\n fields_updated.append('candidate_url')\n save_changes = True\n elif positive_value_exists(politician.politician_url2):\n candidate.candidate_url = politician.politician_url2\n fields_updated.append('candidate_url')\n save_changes = True\n elif positive_value_exists(politician.politician_url3):\n candidate.candidate_url = politician.politician_url3\n fields_updated.append('candidate_url')\n save_changes = True\n elif positive_value_exists(politician.politician_url4):\n candidate.candidate_url = politician.politician_url4\n fields_updated.append('candidate_url')\n save_changes = True\n elif positive_value_exists(politician.politician_url5):\n candidate.candidate_url = politician.politician_url5\n fields_updated.append('candidate_url')\n save_changes = True\n if positive_value_exists(politician.vote_usa_politician_id):\n candidate.vote_usa_politician_id = politician.vote_usa_politician_id\n fields_updated.append('vote_usa_politician_id')\n save_changes = True\n # Photos\n if candidate.profile_image_type_currently_active == PROFILE_IMAGE_TYPE_UNKNOWN:\n if positive_value_exists(politician.profile_image_type_currently_active) \\\n and politician.profile_image_type_currently_active != PROFILE_IMAGE_TYPE_UNKNOWN:\n candidate.profile_image_type_currently_active = politician.profile_image_type_currently_active\n save_changes = True\n if 'profile_image_type_currently_active' not in fields_updated:\n fields_updated.append('profile_image_type_currently_active')\n results = copy_field_value_from_object1_to_object2(\n object1=politician,\n object2=candidate,\n object1_field_name_list=[\n 'we_vote_hosted_profile_facebook_image_url_large',\n 'we_vote_hosted_profile_facebook_image_url_medium',\n 'we_vote_hosted_profile_facebook_image_url_tiny',\n 'we_vote_hosted_profile_twitter_image_url_large',\n 'we_vote_hosted_profile_twitter_image_url_medium',\n 'we_vote_hosted_profile_twitter_image_url_tiny',\n 'we_vote_hosted_profile_uploaded_image_url_large',\n 'we_vote_hosted_profile_uploaded_image_url_medium',\n 'we_vote_hosted_profile_uploaded_image_url_tiny',\n 'we_vote_hosted_profile_vote_usa_image_url_large',\n 'we_vote_hosted_profile_vote_usa_image_url_medium',\n 'we_vote_hosted_profile_vote_usa_image_url_tiny',\n ],\n only_change_object2_field_if_incoming_value=True,\n only_change_object2_field_if_no_existing_value=True)\n candidate = results['object2'] if results['success'] and results['values_changed'] else candidate\n save_changes = save_changes or results['values_changed']\n fields_updated_append = results['fields_updated']\n for new_field in fields_updated_append:\n if new_field not in fields_updated:\n fields_updated.append(new_field)\n\n profile_image_default_updated = False\n from image.controllers import organize_object_photo_fields_based_on_image_type_currently_active\n results = organize_object_photo_fields_based_on_image_type_currently_active(\n object_with_photo_fields=candidate,\n profile_image_type_currently_active=candidate.profile_image_type_currently_active,\n )\n if results['success']:\n candidate = results['object_with_photo_fields']\n profile_image_default_updated = results['profile_image_default_updated']\n save_changes = save_changes or results['values_changed']\n\n if profile_image_default_updated:\n if 'profile_image_type_currently_active' not in fields_updated:\n fields_updated.append('profile_image_type_currently_active')\n if 'we_vote_hosted_profile_image_url_large' not in fields_updated:\n fields_updated.append('we_vote_hosted_profile_image_url_large')\n if 'we_vote_hosted_profile_image_url_medium' not in fields_updated:\n fields_updated.append('we_vote_hosted_profile_image_url_medium')\n if 'we_vote_hosted_profile_image_url_tiny' not in fields_updated:\n fields_updated.append('we_vote_hosted_profile_image_url_tiny')\n\n # Other\n # if not positive_value_exists(candidate.wikipedia_url) and \\\n # positive_value_exists(politician.wikipedia_url):\n # candidate.wikipedia_url = politician.wikipedia_url\n # fields_updated.append('wikipedia_url')\n # save_changes = True\n except Exception as e:\n status += 'FAILED_TO_UPDATE_CANDIDATE: ' \\\n '{error} [type: {error_type}]'.format(error=e, error_type=type(e))\n success = False\n\n results = {\n 'candidate': candidate,\n 'fields_updated': fields_updated,\n 'save_changes': save_changes,\n 'status': status,\n 'success': success,\n }\n return results",
"def augment_candidate_with_contest_office_data(candidate, office):\n status = ''\n success = True\n values_changed = False\n\n error_results = {\n 'candidate': candidate,\n 'success': success,\n 'status': status,\n 'values_changed': values_changed,\n }\n\n if not candidate or not hasattr(candidate, 'contest_office_name'):\n status += \"CANDIDATE_MISSING \"\n error_results['status'] = status\n error_results['success'] = False\n return error_results\n\n if not office or not hasattr(office, 'google_civic_election_id'):\n status += \"OFFICE_MISSING \"\n error_results['status'] = status\n error_results['success'] = False\n return error_results\n\n if positive_value_exists(office.office_name) \\\n and office.office_name != candidate.contest_office_name:\n candidate.contest_office_name = office.office_name\n values_changed = True\n if positive_value_exists(office.district_name) \\\n and office.district_name != candidate.district_name:\n candidate.district_name = office.district_name\n values_changed = True\n return {\n 'candidate': candidate,\n 'success': success,\n 'status': status,\n 'values_changed': values_changed,\n }",
"def candidate_office_state(self, candidate_office_state):\n\n self._candidate_office_state = candidate_office_state",
"def test_edition_of_aid_status(client, contributor):\n\n aid = AidFactory(status='draft', author=contributor)\n client.force_login(contributor)\n\n update_status_url = reverse('aid_status_update_view', args=[aid.slug])\n res = client.get(update_status_url)\n assert res.status_code == 405 # Method not allowed, only post\n\n res = client.post(update_status_url, {'current_status': 'draft'})\n aid.refresh_from_db()\n assert res.status_code == 302\n assert aid.status == 'reviewable'\n\n res = client.post(update_status_url, {'current_status': 'reviewable'})\n aid.refresh_from_db()\n assert res.status_code == 302\n assert aid.status == 'draft'\n\n aid.status = 'published'\n aid.save()\n\n res = client.post(update_status_url, {'current_status': 'published'})\n aid.refresh_from_db()\n assert res.status_code == 302\n assert aid.status == 'draft'",
"def test_candidate_creation(self):\r\n self.register_user()\r\n result = self.login_user()\r\n access_token = json.loads(result.data.decode())['access_token']\r\n\r\n # create a candidate by making a POST request\r\n res = self.client().post('/candidate',headers=dict(Authorization=access_token),data=self.candidate)\r\n self.assertEqual(res.status_code, 201)",
"def test_update(self):\n optimizer = \"RandomSearch\"\n name = \"test_init_experiment\"\n param_defs = {\n \"x\": MinMaxNumericParamDef(0, 1),\n \"name\": NominalParamDef([\"A\", \"B\", \"C\"])\n }\n minimization = True\n\n LAss = PrettyLabAssistant()\n LAss.init_experiment(name, optimizer, param_defs, minimization=minimization)\n cand = LAss.get_next_candidate(name)\n cand.result = 1\n LAss.update(name, cand)\n assert_items_equal(LAss.exp_assistants[name].experiment.candidates_finished, [cand])\n assert_equal(LAss.exp_assistants[name].experiment.candidates_finished[0].result, 1)",
"def test_decision_maker_handle_state_update_initialize(self):\n good_holdings = {\"good_id\": 2}\n currency_holdings = {\"FET\": 100}\n utility_params = {\"good_id\": 20.0}\n exchange_params = {\"FET\": 10.0}\n tx_fee = 1\n state_update_message = StateUpdateMessage(\n performative=StateUpdateMessage.Performative.INITIALIZE,\n amount_by_currency_id=currency_holdings,\n quantities_by_good_id=good_holdings,\n exchange_params_by_currency_id=exchange_params,\n utility_params_by_good_id=utility_params,\n tx_fee=tx_fee,\n )\n self.decision_maker.handle(state_update_message)\n assert self.decision_maker.ownership_state.amount_by_currency_id is not None\n assert self.decision_maker.ownership_state.quantities_by_good_id is not None\n assert (\n self.decision_maker.preferences.exchange_params_by_currency_id is not None\n )\n assert self.decision_maker.preferences.utility_params_by_good_id is not None",
"def update_aid_status(self):\n aid = self.object\n\n # Check that submitted form data is still consistent\n current_status = self.request.POST.get('current_status', None)\n if aid.status != current_status:\n return\n\n STATES = AidWorkflow.states\n if aid.status == STATES.draft:\n aid.submit()\n elif aid.status in (STATES.reviewable, STATES.published):\n aid.unpublish()\n log_admins.delay(\n 'Aide dépubliée',\n 'Une aide vient d\\'être dépubliée.\\n\\n{}'.format(aid),\n aid.get_absolute_url())\n\n msg = _('We updated your aid status.')\n messages.success(self.request, msg)",
"def __init__(\n self,\n initial_candidate: Genome,\n genome_factory: AbstractGenomeFactory,\n convergence_criterion: AbstractConvergenceCriterion):\n self._candidate = initial_candidate\n self._genome_factory = genome_factory\n self._convergence_criterion = convergence_criterion\n self._converged = False",
"def test_get_candidate(self):\n self.populate_database()\n response = self.client.get(\"/api/candidates/{}\".format(self.candidateBB.id),\n headers=[(\"Accept\", \"application/json\")])\n longURL_response = self.client.get(\n \"/api/races/{}/candidates/{}\".format(\n self.raceB.id, self.candidateBB.id),\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(longURL_response.status_code, 200)\n self.assertEqual(response.mimetype, \"application/json\")\n self.assertEqual(longURL_response.mimetype, \"application/json\")\n\n candidate = json.loads(response.data.decode(\"ascii\"))\n candidate_long = json.loads(longURL_response.data.decode(\"ascii\"))\n self.assertEqual(candidate[\"title\"], \"Candidate BB\")\n self.assertEqual(candidate[\"race_id\"], self.candidateBB.race_id)\n self.assertEqual(candidate_long[\"race_id\"], self.candidateBB.race_id)",
"def handleRequestVote(self, candidate_id, candidate_term,\n candidate_log_term, candidate_log_index):\n if candidate_id not in self.getAllCenterID():\n logging.warning('{} requested vote, but he is not in current config'\n .format(candidate_id))\n return\n if candidate_term < self.current_term:\n self.server.requestVoteReply(\n candidate_id, self.current_term, False)\n return\n if candidate_term > self.current_term:\n self.current_term = candidate_term\n self.current_term = max(candidate_term, self.current_term)\n grant_vote = (not self.voted_for or self.voted_for == candidate_id) and\\\n candidate_log_index >= self.getLatest()[1]\n if grant_vote:\n self.stepDown()\n self.voted_for = candidate_id\n logging.debug('voted for DC-{} in term {}'\n .format(candidate_id, self.current_term))\n dictobj = {'current_term': self.current_term, 'voted_for': self.voted_for, 'log': self.log}\n filename = \"./state\"+self.datacenter_id+'.pkl'\n fileobj = open(filename, 'wb')\n pickle.dump(dictobj, fileobj)\n fileobj.close()\n self.server.requestVoteReply(\n candidate_id, self.current_term, grant_vote)",
"def _update_candidate_range(self):\n for r in self.revisions:\n if r.tested:\n if r.good:\n self.lkgr = r\n elif r.bad:\n self.fkbr = r\n break\n assert self.lkgr and self.fkbr",
"def make_conforming_candidate_and_get_penalty(\n self,\n candidate: object\n ) -> tuple:\n pass",
"def _update_status(self, status: dict):\n with generate_retry_session() as session:\n session.headers.update({\n 'Authorization': 'Bearer {}'.format(self.platform_auth_token)\n })\n url = '{}/training/definitions/{}/jobs/{}/status'.format(\n ORGANIZATION_ENDPOINT, self.job_definition_name, self.training_job_id)\n res = session.put(url, json=status)\n res.raise_for_status()",
"def candidate_retrieve_for_api(candidate_id, candidate_we_vote_id): # candidateRetrieve\n # NOTE: Candidates retrieve is independent of *who* wants to see the data. Candidates retrieve never triggers\n # a ballot data lookup from Google Civic, like voterBallotItems does\n\n if not positive_value_exists(candidate_id) and not positive_value_exists(candidate_we_vote_id):\n status = 'VALID_CANDIDATE_ID_AND_CANDIDATE_WE_VOTE_ID_MISSING'\n json_data = {\n 'status': status,\n 'success': False,\n 'kind_of_ballot_item': CANDIDATE,\n 'id': candidate_id,\n 'we_vote_id': candidate_we_vote_id,\n 'google_civic_election_id': 0,\n }\n return HttpResponse(json.dumps(json_data), content_type='application/json')\n\n candidate_manager = CandidateManager()\n if positive_value_exists(candidate_id):\n results = candidate_manager.retrieve_candidate_from_id(candidate_id, read_only=True)\n success = results['success']\n status = results['status']\n elif positive_value_exists(candidate_we_vote_id):\n results = candidate_manager.retrieve_candidate_from_we_vote_id(candidate_we_vote_id, read_only=True)\n success = results['success']\n status = results['status']\n else:\n status = 'VALID_CANDIDATE_ID_AND_CANDIDATE_WE_VOTE_ID_MISSING_2' # It should be impossible to reach this\n json_data = {\n 'status': status,\n 'success': False,\n 'kind_of_ballot_item': CANDIDATE,\n 'id': candidate_id,\n 'we_vote_id': candidate_we_vote_id,\n 'google_civic_election_id': 0,\n }\n return HttpResponse(json.dumps(json_data), content_type='application/json')\n\n if success:\n candidate = results['candidate']\n results = generate_candidate_dict_from_candidate_object(\n candidate=candidate)\n candidate_dict = results['candidate_dict']\n json_data = candidate_dict\n json_data['status'] = status\n json_data['success'] = success\n else:\n json_data = {\n 'status': status,\n 'success': False,\n 'kind_of_ballot_item': CANDIDATE,\n 'id': candidate_id,\n 'we_vote_id': candidate_we_vote_id,\n 'google_civic_election_id': 0,\n }\n\n return HttpResponse(json.dumps(json_data), content_type='application/json')",
"def test_candidate(self):\n url = reverse('candidates-list')\n print(url, type(url))\n response = self.client.get(url, format='json')\n assert response.status_code == status.HTTP_200_OK\n assert Candidate.objects.count()== len(response.json())",
"def _update_candidate_and_parameters_when_candidate_within_trustregion(\n x_candidate,\n main_model,\n hessian_info,\n lambdas,\n newton_step,\n stopping_criteria,\n converged,\n):\n n = len(x_candidate)\n\n s_min, z_min = estimate_smallest_singular_value(hessian_info.upper_triangular)\n step_len = _compute_smallest_step_len_for_candidate_vector(x_candidate, z_min)\n\n quadratic_term = x_candidate.T @ hessian_info.hessian_plus_lambda @ x_candidate\n\n relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambdas.candidate)\n if relative_error <= stopping_criteria[\"k_hard\"]:\n x_candidate = x_candidate + step_len * z_min\n converged = True\n\n lambda_new_lower_bound = max(lambdas.lower_bound, lambdas.candidate - s_min**2)\n\n hessian_plus_lambda = main_model.square_terms + newton_step * np.eye(n)\n _, factorization_unsuccessful = compute_cholesky_factorization(\n hessian_plus_lambda,\n lower=False,\n overwrite_a=False,\n clean=True,\n )\n\n if factorization_unsuccessful == 0:\n hessian_already_factorized = True\n lambda_new_candidate = newton_step\n else:\n hessian_already_factorized = hessian_info.already_factorized\n lambda_new_lower_bound = max(lambda_new_lower_bound, newton_step)\n lambda_new_candidate = _get_new_lambda_candidate(\n lower_bound=lambda_new_lower_bound, upper_bound=lambdas.candidate\n )\n\n hessian_info_new = hessian_info._replace(\n hessian_plus_lambda=hessian_plus_lambda,\n already_factorized=hessian_already_factorized,\n )\n\n lambdas_new = lambdas._replace(\n candidate=lambda_new_candidate,\n lower_bound=lambda_new_lower_bound,\n upper_bound=lambdas.candidate,\n )\n\n return x_candidate, hessian_info_new, lambdas_new, converged",
"def on_update(self):\n if self.get('update_request') and not self.is_pending_approval():\n if self.is_revert:\n self.set_as_reverted()\n else:\n self.set_as_success()",
"def update_decision(request, sub_id):\n submission = get_object_or_404(Submission, id=sub_id)\n validate_chair_access(request.user, submission.conference)\n stage, _ = ReviewStage.objects.get_or_create(\n submission=submission,\n num_reviews_required=(\n submission.stype.num_reviews if submission.stype else 0),\n locked=False)\n decision = stage.decision\n form = UpdateReviewDecisionForm(request.POST, instance=decision)\n if form.is_valid():\n form.save()\n return JsonResponse(status=200, data={})\n return JsonResponse(status=500, data={'errors': form.errors})",
"def create_candidate_list():\n candidate_ids = request.vars['candidate_ids[]'] or {}\n\n candidate_ids = [int(candidate_id) for candidate_id in candidate_ids]\n if not len(candidate_ids):\n # no candidate_ids\n return response.json({'error': 1})\n json_candidate_ids = json.dumps(candidate_ids)\n\n m = hashlib.md5()\n m.update(json_candidate_ids + str(auth.user.id))\n\n hash = m.hexdigest()\n\n candidate_list_id = db.public_candidate_sharing.insert(userId=auth.user.id, candidateIdList=json_candidate_ids,\n hashKey=hash)\n\n return response.json({'url': URL('talent', 'candidate_list', args=[candidate_list_id, hash], host=True)})",
"def initiate(self):\n self._load_parameters()\n self._initiate_region_dict()\n self._initiate_parameter_dict()\n self.initiated = True"
] |
[
"0.58220094",
"0.5303385",
"0.5094234",
"0.5007821",
"0.4941206",
"0.49129364",
"0.48752788",
"0.48752788",
"0.48721525",
"0.48375428",
"0.47506183",
"0.46613085",
"0.46536946",
"0.46478763",
"0.46375787",
"0.45163202",
"0.4498283",
"0.44909114",
"0.44743252",
"0.44633308",
"0.44313896",
"0.43723106",
"0.4309025",
"0.43090132",
"0.42951208",
"0.4278678",
"0.42738065",
"0.4270254",
"0.4226349",
"0.4218509"
] |
0.87281287
|
0
|
Sets the division of this Game.
|
def division(self, division):
self._division = division
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def division(self, division):\n \n self._division = division",
"def set_divide(self, a_divide):\n self.set_parameter('divide', a_divide)\n return self",
"def division(self):\n return self._division",
"def set_divisions(self, nx=1, ny=1):\n\n self.nx = nx\n self.ny = ny",
"def __div__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Divide, value)\n return out",
"def set_ratio(self, value):\n scene = self.scenes[self.current_scene]\n scene.set_perspective(ratio=value)\n self.redraw()",
"def setSplit(self,split):\n self.split=split",
"def _divide(self, denominator):\n self.precision = (self.precision / float(denominator))\n self.accuracy = (self.accuracy / float(denominator))\n self.recall = (self.recall / float(denominator))\n self.f_measure = (self.f_measure / float(denominator))",
"def divideAll(self, divisor):\n divisor = float(divisor)\n for key in self:\n self[key] /= divisor",
"def divideAll(self, divisor):\n divisor = float(divisor)\n for key in self:\n self[key] /= divisor",
"def __ifloordiv__(self, d_value: float):\n self.set_value(self.get_value() // d_value)\n return self",
"def set_ratio(self, ratio: tuple) -> None:\r\n self.ratio = ratio",
"def dividir(self):\n self.resultado = self.valor_1 / self.valor_2",
"def denominator(self, denominator):\n\n self._denominator = denominator",
"def div(self, first, second):\n try:\n if isinstance(second, str):\n second = self._variables[second]\n first_num = self._variables[first]\n self._variables[first] = int(first_num / second)\n except:\n print(f\"Could not divide {first} / {second}\")",
"def __init__(self, screen_size, grid_size):\n super(MainScreen, self).__init__(screen_size)\n self.gamegrid = QuadraticGrid(grid_size[0], grid_size[1])\n self.grid_width = grid_size[0]\n self.grid_height = grid_size[1]\n self.block_width = screen_size[0] / grid_size[0]\n self.block_height = screen_size[1] / grid_size[1]\n print str(self.block_width) + \" \" + str(self.block_height)\n \n self.game_model = GameModel(grid_size)\n self.dragon_group = pygame.sprite.Group()\n self.gun_group = pygame.sprite.Group()\n self.hat_group = pygame.sprite.Group()",
"def __div__(self, other: float) -> 'Translation':\n self._vector.setWithArray((self._vector.x / other, self._vector.y / other, self._vector.z / other))\n return self",
"def format_division(self, data):\n return data",
"def set_assignment_game_frame(self, assignment):\n\n self.frames[\"game\"].set_assignment(assignment)",
"def dividend(self, dividend):\n\n self._dividend = dividend",
"def SetContext(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivide_SetContext(self, *args)",
"def change_th_divs(self):\r\n self.theta_div = self.edit_th_divs.value()",
"def __init__(self, numerator, denominator=1):\n gcd1 = math.gcd(numerator, denominator)\n\n if denominator < 0:\n self.numerator = -(int(numerator/gcd1))\n self.denominator = abs(int(denominator/gcd1))\n elif denominator == 0:\n self.numerator = 1\n self.denominator = 0\n else:\n self.numerator = int(numerator/gcd1)\n self.denominator = int(denominator/gcd1)",
"def setFloor(self, x, y):\n\t\tself.setValue(x, y, self.floor_char)",
"def GetDivisions(self):\n ...",
"def SetBoundaryCriterion(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivideContinuity_SetBoundaryCriterion(self, *args)",
"def divide(self):\n return self._do_calc(self.divider)",
"def divide(self):\n return self._do_calc(self.divider)",
"def divide(self):\n return self._do_calc(self.divider)",
"def _divide(self, denominator):\n self.explained_variance = (self.explained_variance / float(denominator))\n self.mean_absolute_error = (self.mean_absolute_error / float(denominator))\n self.mean_squared_error = (self.mean_squared_error / float(denominator))\n self.r2 = (self.r2 / float(denominator))\n self.root_mean_squared_error = (self.root_mean_squared_error / float(denominator))"
] |
[
"0.79271126",
"0.6628609",
"0.6287356",
"0.62198645",
"0.56096643",
"0.55676526",
"0.5439452",
"0.53641415",
"0.531078",
"0.531078",
"0.5296111",
"0.52825195",
"0.52011275",
"0.519674",
"0.5180101",
"0.51800334",
"0.5175467",
"0.5129107",
"0.5086838",
"0.5081677",
"0.50641596",
"0.49582183",
"0.49545905",
"0.49383366",
"0.4935134",
"0.49278513",
"0.49110287",
"0.49110287",
"0.49110287",
"0.49105957"
] |
0.7836024
|
1
|
Sets the round of this Game.
|
def round(self, round):
self._round = round
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def new_round(self, round_number: int):\n self.turn = 0\n self.round = round_number",
"def set_round_game(self, round_game):\r\n if not round_game:\r\n return self.default_round_game\r\n return round_game",
"def update_visual_round(self, round):\n self.text_round.config(text=round)",
"def setRoundingRadius( self, radius ):\n self._roundingRadius = radius\n self.setDirty()",
"def SetRoundShape(self):\n w, h = self.GetSize()\n self.SetShape(GetRoundShape(w, h, 10))",
"def set_num_rounds(cls, new_num_rounds):\n raise NotImplementedError(\"subclasses need to override this method\")",
"def set_time_in_round(time: int):\n store.round_time = time",
"def rounds(self, rounds: List[Round]):\n\n self._rounds = rounds",
"def setR(self, radius):\n self.radius = radius",
"def _increment_round_number():\n store.round += 1",
"def simulate(self):\n self.round += 1",
"def newRound():\r\n pass",
"def makeRoundOver(self):\n self.isRoundOver = True\n self.refresh()",
"def set_radius(self, radius):\n self._radius = radius\n self._reset_slot_bounds()",
"def roundParameters(self, roundNum):\n ruleName = random.choice(self.state.ruleNames)\n return SupportedRules[ruleName].makeRoundParameters(self, roundNum)",
"def set_radius(self, radius):\n self.__begin.set_radius(radius)\n self.__end.set_radius(radius)",
"def SetAnalysisRound(self):\r\n if self.FilepathSwitchBox.currentText() == 'Tag':\r\n self.Tag_round_infor.append(self.AnalysisRoundBox.value())\r\n elif self.FilepathSwitchBox.currentText() == 'Lib':\r\n self.Lib_round_infor.append(self.AnalysisRoundBox.value())\r\n \r\n self.normalOutputWritten('Tag_round_infor: {}\\nLib_round_infor: {}\\n'.format(str(self.Tag_round_infor), str(self.Lib_round_infor)))",
"def play_round(self):\n print('='*10) # Round separation display\n print(f'Round {self.round_num}:')\n for player in self.players:\n\n # Player separation display:\n if player != self.players[0]:\n print('-' * 5)\n\n self.play_turn(player)\n \n # Return if exit conditions are met\n if (self.exit_flag) or (self.winner is not None) or (self.board.full()):\n return\n self.round_num += 1",
"def start_round(self, round_counter):\n cxnlib.CXNNetStartRound(self.handle, round_counter)",
"def round(self):\n return self._round",
"def set_radius(self, radius, unit='km'):\n self.radius = radius\n self.unit = unit",
"def increment_round(self, try_again):\r\n\r\n if try_again is False:\r\n\r\n # Increments the counter\r\n self.round_counter += 1\r\n round_value = \"PLAYER: {}\".format(self.player_wins) + (\" \" * 62) + \"ROUND {}\".format(\r\n self.round_counter) + (\" \" * 62) +\"OPPONENT: {}\".format(self.opp_wins)\r\n\r\n # Clear the round entry box\r\n self.round_entry.delete(0, \"end\")\r\n\r\n # Insert the new value to display the win/lose record\r\n self.round_entry.insert(0, round_value)",
"def play_round(self) -> None:\r\n # Print round details:\r\n self.open_cards()\r\n print(Messages.OPEN_CARDS)\r\n print(self.card_stack)\r\n print(Messages.MONEY_IN_STACK + \" \", end=\"\")\r\n print(self.money_stack)\r\n\r\n start_player = self.cur_player # Helper for the round to end correctly.\r\n\r\n # The actual round\r\n while self.continue_round(start_player):\r\n if self.active_players == 1:\r\n break\r\n self.round_player_money += self.players[self.cur_player].play_round(self.round_player_money)\r\n if not self.players[self.cur_player].is_active():\r\n self.active_players -= 1\r\n self.next_active_player()\r\n self.players[start_player].set_already_raised(True) # Helper for the round to end correctly.\r\n self.end_round()",
"def newRound(self, winningTeamNumber):\n self.counter[\"Team1\"] = 0\n self.counter[\"Team2\"] = 0\n self.wonRounds[\"Team{}\".format(winningTeamNumber)] += 1\n self.sidesChanged = not self.sidesChanged\n self.updateModel()\n if (self.isGameOver()):\n self.newGame(winningTeamNumber)",
"def round(self):\n #player turn\n if self.started:\n self.started = False #registers the game as started then immediately turns that value false\n if self.initial_action:\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n else: \n if self.apply_policy():\n card = self.deck.deal()\n self.player.value += card.value\n if card.is_ace:\n self.player.usable_ace = True\n else:\n self.player.playing = False\n\n #dealer turn\n if self.dealer.value < 17:\n card = self.deck.deal()\n self.dealer.value += card.value\n self.dealer.visible_value += card.value\n #allow people to reduce their scores by applying aces\n self.apply_ace()\n #check to see if anyone has bust by making bust people not _playing\n if self.player.value > 21:\n self.player.broke = True\n self.player.playing = False\n if self.dealer.value > 21:\n self.dealer.broke = True",
"def setUpNextGameRound(self):\n\t\tself.pots = [0]\n\t\tself.currentBet = [0]\n\t\tself.reinitDeck()\n\t\tself.communityCards = []\n\t\tallPlayers = self.getPlayers()\n\t\tself.resetPlayerHands(allPlayers)\n\t\tself.resetPlayerBetAmount(allPlayers)\n\t\t_, seat = self.findNthPlayerFromSeat(self.curDealerSeatNo, 1)\n\t\tself.curDealerSeatNo = seat\n\t\tself.beginRound()",
"def naked_round(self):\n self.change = False\n for row in range(self.board_size):\n for col in range(self.board_size):\n if len(self.possibles[row][col]) == 1:\n num = self.possibles[row][col].pop()\n self.set_number(num, row, col, \"NS\")",
"def rounds(self):\n if self.round_number > 0:\n for i in range(self.round_number):\n yield Round(i + 1)",
"def run_round(self):\n self._running = True\n self._run_round()\n self._running = False",
"def rounding_decimal(self, rounding_decimal):\n\n self._rounding_decimal = rounding_decimal"
] |
[
"0.75033015",
"0.7253406",
"0.66619974",
"0.65597403",
"0.6454907",
"0.62289166",
"0.61594796",
"0.60673624",
"0.60668546",
"0.596048",
"0.59210134",
"0.59192115",
"0.5855568",
"0.5756124",
"0.57440203",
"0.5726209",
"0.56967396",
"0.5692984",
"0.5583253",
"0.55227274",
"0.5520747",
"0.5462527",
"0.5412759",
"0.5410773",
"0.5358155",
"0.5341191",
"0.53343713",
"0.52728486",
"0.52696353",
"0.52539665"
] |
0.78125286
|
0
|
Sets the player_a_name of this Game.
|
def player_a_name(self, player_a_name):
self._player_a_name = player_a_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def player_b_name(self, player_b_name):\n\n self._player_b_name = player_b_name",
"def player_a_id(self, player_a_id):\n\n self._player_a_id = player_a_id",
"def player_a_games(self, player_a_games):\n\n self._player_a_games = player_a_games",
"def set_name(self, a_name):\n self.set_parameter('name', a_name)\n return self",
"def setNewPlayerName(self, playerName):\n if(self.__playerName==\"???\" and not(self.__playerAlreadyExists(playerName)) ):\n #verify that there isn't already a player by this name\n fileLocs=FileLocations()\n filename = fileLocs.playerProfiles+r\"\\profiles.p\"\n playerList = []\n index = -1\n f = None\n try:\n #load list of players -- if there's no file, then we skip this step \n f = open(filename, \"r\")\n playerList = pickle.load(f)\n f.close() \n except IOError: \n pass\n finally:\n #add name to list\n self.__playerName=playerName\n playerList.append(playerName)\n try:\n f = open(filename,\"w\")\n pickle.dump(playerList, f)\n f.close()\n self.savePlayerInfo()\n except IOError:\n raise PlayerIOError(\"Unable to add player name to profile.p\")\n return True\n else:\n return False ##unsuccessful -- return false",
"def setExistingPlayerName(self, playerName):\n if self.__playerAlreadyExists(playerName):\n self.__playerName = playerName\n return True\n else:\n return False",
"def set_player(self, new_player):\n self.player = new_player",
"def update_players_name(self, player_1, player_2):\n self.model.player_1 = player_1\n self.model.player_2 = player_2\n self.logger.info(\"User_1 has name %s, user_2 has name %s\", player_1, player_2)",
"def get_player_name(self):\n if self.name_entered is False:\n self.name = self.input_name(\"Please enter your name:\")\n self.name_entered = True\n self.score_file()",
"def __updatePlayerName(db, player):\n c = db.cursor()\n id = player['email-hash']\n\n if player['name'] is not None:\n playerTournamentName = player['name']\n else:\n playerTournamentName = player['challonge-username']\n\n c.execute(\"SELECT id FROM players WHERE id='%s'\" % id)\n row = c.fetchone()\n if row is None:\n newPlayerRecord = (player['email-hash'],\n playerTournamentName,\n _newPlayerRating)\n c.execute(\"INSERT INTO players VALUES('%s','%s','%s')\" %\n newPlayerRecord)\n else:\n c.execute(\"SELECT nick FROM players WHERE id='%s'\" % id)\n storedName = c.fetchone()[0]\n if storedName != playerTournamentName:\n c.execute(\"SELECT alias FROM aliases WHERE player_id='%s'\" % id)\n if c.fetchone() is None:\n c.execute(\"INSERT INTO aliases VALUES('%s','%s')\" %\n (playerTournamentName, id))",
"def player_changename(event_var):\r\n debug.write(\"[SourceRPG] Handling player_changename\", 1)\r\n players[event_var['userid']]['name'] = event_var['newname']\r\n debug.write(\"[SourceRPG] player_changename handled\", 1)",
"def change_username(self, name):\n self.username = name",
"def set_name(self):\n player1 = input('Enter a name for player 1: ')\n self._players.append(player1)\n player2 = input('Enter a name for player 2: ')\n self._players.append(player2)\n print()\n return self._players",
"def set_name(self, name):\n\t\tself.name_ = name",
"def __setPlayerFilename(self):\n if self.__playerName != \"???\":\n l=self.__playerName.rsplit(\" \")\n nameWithoutSpaces=\"_\".join(l)\n self.__filename = fileLocs.playerProfiles+\"\\\\\"+nameWithoutSpaces+r\".p\"",
"def set_name(self, name):\n self.name = name # overwrite the existing name with the input name",
"def set_name(self, name):\n self.name = name # overwrite the existing name with the input name",
"def set_player(self, player):\n\n self._player = player",
"def __set_name(self, name):\r\n\t\tself.__name = name\r\n\t\tself._window.chat_panel.place_name = name\r\n\t\tself.encode_message(action=\"NO\", selected_name=name)",
"def enter_name(self, name):\n self.name = name",
"def set_name(self, _name):\n self.name = _name",
"def set_character_name(self, character_name):\n self.name = character_name",
"def set_name(self, newname=\"\"):\n self.name = newname",
"def set_name(self, name):\n self.name = name",
"def set_name(self, name):\n self.name = name",
"def set_name(self, name):\n self.name = name",
"def set_name(self, name):\n self.name = name",
"def set_name(self, name):\n self.name = name",
"def received_NAME(self, message=None):\n\n\t\tself.player_client.send_message(self.player_model.player.name)",
"def set_name(self, room_name):\n self.name = room_name"
] |
[
"0.7192527",
"0.70124376",
"0.67124784",
"0.6590799",
"0.6398148",
"0.6380828",
"0.63518584",
"0.63177353",
"0.6313776",
"0.62537956",
"0.6250535",
"0.6231284",
"0.6177",
"0.61729723",
"0.6132019",
"0.6120752",
"0.6120752",
"0.6052022",
"0.60450685",
"0.6041063",
"0.59877247",
"0.59749705",
"0.59672415",
"0.59653735",
"0.59653735",
"0.59653735",
"0.59653735",
"0.59653735",
"0.596439",
"0.5959141"
] |
0.9132155
|
0
|
Sets the player_b_name of this Game.
|
def player_b_name(self, player_b_name):
self._player_b_name = player_b_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def player_a_name(self, player_a_name):\n\n self._player_a_name = player_a_name",
"def player_b_games(self, player_b_games):\n\n self._player_b_games = player_b_games",
"def player_b_id(self, player_b_id):\n\n self._player_b_id = player_b_id",
"def bcp_player_variable(self, name, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player[name] = value",
"def update_players_name(self, player_1, player_2):\n self.model.player_1 = player_1\n self.model.player_2 = player_2\n self.logger.info(\"User_1 has name %s, user_2 has name %s\", player_1, player_2)",
"def update_hobby_name(self, curr_hobby, new_name):\r\n for hobby in self.__hobbies:\r\n if hobby == curr_hobby:\r\n hobby.update_name(new_name)",
"def received_NAME(self, message=None):\n\n\t\tself.player_client.send_message(self.player_model.player.name)",
"def player_b_rating(self, player_b_rating):\n\n self._player_b_rating = player_b_rating",
"def __set_name(self, name):\r\n\t\tself.__name = name\r\n\t\tself._window.chat_panel.place_name = name\r\n\t\tself.encode_message(action=\"NO\", selected_name=name)",
"def player_b_points(self, player_b_points):\n\n self._player_b_points = player_b_points",
"def setName(self, *args):\n return _libsbml.FluxBound_setName(self, *args)",
"def setNewPlayerName(self, playerName):\n if(self.__playerName==\"???\" and not(self.__playerAlreadyExists(playerName)) ):\n #verify that there isn't already a player by this name\n fileLocs=FileLocations()\n filename = fileLocs.playerProfiles+r\"\\profiles.p\"\n playerList = []\n index = -1\n f = None\n try:\n #load list of players -- if there's no file, then we skip this step \n f = open(filename, \"r\")\n playerList = pickle.load(f)\n f.close() \n except IOError: \n pass\n finally:\n #add name to list\n self.__playerName=playerName\n playerList.append(playerName)\n try:\n f = open(filename,\"w\")\n pickle.dump(playerList, f)\n f.close()\n self.savePlayerInfo()\n except IOError:\n raise PlayerIOError(\"Unable to add player name to profile.p\")\n return True\n else:\n return False ##unsuccessful -- return false",
"def player_changename(event_var):\r\n debug.write(\"[SourceRPG] Handling player_changename\", 1)\r\n players[event_var['userid']]['name'] = event_var['newname']\r\n debug.write(\"[SourceRPG] player_changename handled\", 1)",
"def set_name(self, name):\n\t\tself.name_ = name",
"def __setPlayerFilename(self):\n if self.__playerName != \"???\":\n l=self.__playerName.rsplit(\" \")\n nameWithoutSpaces=\"_\".join(l)\n self.__filename = fileLocs.playerProfiles+\"\\\\\"+nameWithoutSpaces+r\".p\"",
"def set_name(self, newname=\"\"):\n self.name = newname",
"def name_bank(self, new_bank_name: str):\n if self.is_bank_name_valid(new_bank_name):\n self.bank_name = new_bank_name\n else:\n raise ValueError(\"Bank name is not valid!\")",
"def set_name(self, _name):\n self.name = _name",
"def update_name(self, new_name):\r\n self.__name = new_name",
"def update_name(self, new_name):\r\n self.__name = new_name",
"def nome_bandeira(self, nome_bandeira):\n self._nome_bandeira = nome_bandeira",
"def team_name(self, team_name):\n\n self._team_name = team_name",
"def new_name(self,new_name):\n self.name = new_name",
"def __updatePlayerName(db, player):\n c = db.cursor()\n id = player['email-hash']\n\n if player['name'] is not None:\n playerTournamentName = player['name']\n else:\n playerTournamentName = player['challonge-username']\n\n c.execute(\"SELECT id FROM players WHERE id='%s'\" % id)\n row = c.fetchone()\n if row is None:\n newPlayerRecord = (player['email-hash'],\n playerTournamentName,\n _newPlayerRating)\n c.execute(\"INSERT INTO players VALUES('%s','%s','%s')\" %\n newPlayerRecord)\n else:\n c.execute(\"SELECT nick FROM players WHERE id='%s'\" % id)\n storedName = c.fetchone()[0]\n if storedName != playerTournamentName:\n c.execute(\"SELECT alias FROM aliases WHERE player_id='%s'\" % id)\n if c.fetchone() is None:\n c.execute(\"INSERT INTO aliases VALUES('%s','%s')\" %\n (playerTournamentName, id))",
"def setName(self,value):\n assert value == None or type(value) == str, repr(value)+' is not a valid name'\n self._name = value",
"def get_player_name(self):\n if self.name_entered is False:\n self.name = self.input_name(\"Please enter your name:\")\n self.name_entered = True\n self.score_file()",
"def updateName(self,name):\n self.name = name",
"def set_name(self, name):\n self.name = name # overwrite the existing name with the input name",
"def set_name(self, name):\n self.name = name # overwrite the existing name with the input name",
"def setName(self, name):\n self.name = str(name)"
] |
[
"0.7046953",
"0.68283844",
"0.6823286",
"0.60431397",
"0.602946",
"0.5915856",
"0.5797274",
"0.57551175",
"0.564356",
"0.5617421",
"0.5611178",
"0.5594154",
"0.5562194",
"0.55580914",
"0.55133885",
"0.54521626",
"0.543514",
"0.5420806",
"0.5417903",
"0.5417903",
"0.5415289",
"0.5399954",
"0.5396593",
"0.53883106",
"0.5377617",
"0.5360032",
"0.5355609",
"0.53521335",
"0.53521335",
"0.53468704"
] |
0.9181073
|
0
|
Sets the player_a_id of this Game.
|
def player_a_id(self, player_a_id):
self._player_a_id = player_a_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_player_id(self, player_id):\n self.player_id = player_id",
"def player_a_games(self, player_a_games):\n\n self._player_a_games = player_a_games",
"def set_id(self, player_id):\n pass",
"def player_id(self, player_id):\n\n self._player_id = player_id",
"def player_id(self, player_id):\n\n self._player_id = player_id",
"def player_a_name(self, player_a_name):\n\n self._player_a_name = player_a_name",
"def player_b_id(self, player_b_id):\n\n self._player_b_id = player_b_id",
"def setA(self, a):\n\t\tself.a = int(a)",
"def player_a_points(self, player_a_points):\n\n self._player_a_points = player_a_points",
"def set_player(self, player):\n\n self._player = player",
"def set_player(self, new_player):\n self.player = new_player",
"def player_a_rating(self, player_a_rating):\n\n self._player_a_rating = player_a_rating",
"def set_game_id(self, game_name):\n dic = {(''.join(filter(str.isalpha, key))): v for key, v in self.games_map.items()}\n dic = dic[self.league]\n dic = {(''.join(filter(str.isalpha,key))):v for key,v in dic.items()}\n self.game_id = dic[game_name][0]\n self.game_time = dic[game_name][1]",
"def game_id(self, game_id):\n\n self._game_id = game_id",
"def game_id(self, game_id):\n\n self._game_id = game_id",
"def game_id(self, game_id):\n\n self._game_id = game_id",
"async def set_player(self, player: Optional[andesite.Player]) -> None:\n ...",
"def a(self, a):\n\n self._a = a",
"def set_player(self, char_data):\n self.player = self.server.object_manager.add_player(char_data)",
"def fantasy_alarm_player_id(self, fantasy_alarm_player_id):\n\n self._fantasy_alarm_player_id = fantasy_alarm_player_id",
"def set_player(self, player_params):\n\n self.saved_player = player_params",
"def rotoworld_player_id(self, rotoworld_player_id):\n\n self._rotoworld_player_id = rotoworld_player_id",
"def assisted_by_player_id1(self, assisted_by_player_id1):\n\n self._assisted_by_player_id1 = assisted_by_player_id1",
"def set_api_id(self, player_api_id, persist=True):\n self.player_api_id = player_api_id\n if persist:\n update(self)",
"def set_game_id(self, value: str) -> None:\n self.game_name_entry.delete(0, len(self.game_name_entry.get()))\n self.game_name_entry.insert(0, value)",
"def node_a(self, node_a):\n\n self._node_a = node_a",
"def setCurrentPlayer(self):\n self.current_player = next(self.get_player_order_fn())",
"def check_player_id(self):\n if self.player_id == 'me':\n profile = self.profile\n self.player_id = profile['id']",
"def get_player_id(self):\n\n return self.player_id",
"def associate_with_sessionplayer(self,\n sessionplayer: ba.SessionPlayer) -> None:\n self._sessionteam = weakref.ref(sessionplayer.sessionteam)\n self.character = sessionplayer.character\n self._last_sessionplayer = sessionplayer\n self._sessionplayer = sessionplayer\n self.streak = 0"
] |
[
"0.69736564",
"0.69562083",
"0.6944866",
"0.685115",
"0.685115",
"0.67752093",
"0.6499893",
"0.6387207",
"0.61095905",
"0.5931413",
"0.5756862",
"0.5719053",
"0.5642285",
"0.5627128",
"0.5627128",
"0.5627128",
"0.55055493",
"0.5500313",
"0.5443958",
"0.544141",
"0.5326835",
"0.5310366",
"0.5263031",
"0.5248024",
"0.5141491",
"0.5066945",
"0.50504035",
"0.5031155",
"0.50236684",
"0.50234467"
] |
0.8947233
|
0
|
Sets the player_b_id of this Game.
|
def player_b_id(self, player_b_id):
self._player_b_id = player_b_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def player_b_games(self, player_b_games):\n\n self._player_b_games = player_b_games",
"def player_b_name(self, player_b_name):\n\n self._player_b_name = player_b_name",
"def player_a_id(self, player_a_id):\n\n self._player_a_id = player_a_id",
"def set_player_id(self, player_id):\n self.player_id = player_id",
"def set_id(self, player_id):\n pass",
"def player_id(self, player_id):\n\n self._player_id = player_id",
"def player_id(self, player_id):\n\n self._player_id = player_id",
"def setB(self, b):\n\t\tself.b = int(b)",
"def player_b_points(self, player_b_points):\n\n self._player_b_points = player_b_points",
"def player_b_rating(self, player_b_rating):\n\n self._player_b_rating = player_b_rating",
"def set_bid(self, bid):\n self.__bid = bid",
"def pb_id(self, pb_id: str):\n # FIXME(BMo) instead of creating the object to check if the PB exists\n # use a method on PB List?\n # ProcessingBlock(pb_id)\n self.set_state(DevState.ON)\n self._pb_id = pb_id",
"def b(self, b):\n\n self._b = b",
"def id_bandeira(self, id_bandeira):\n self._id_bandeira = id_bandeira",
"def add_bid(self, bid, player_id):\n\t\tglobal_id = self.globalize_id(player_id)\n\t\tassert len(self.bids) < self.data_size and global_id not in self.bids\n\t\tif bid == 0:\n\t\t\tbid = \"N\"\n\t\tself.bids[global_id] = bid",
"def game_id(self, game_id):\n\n self._game_id = game_id",
"def game_id(self, game_id):\n\n self._game_id = game_id",
"def game_id(self, game_id):\n\n self._game_id = game_id",
"def roto_wire_player_id(self, roto_wire_player_id):\n\n self._roto_wire_player_id = roto_wire_player_id",
"def node_b(self, node_b):\n\n self._node_b = node_b",
"def set_player(self, new_player):\n self.player = new_player",
"def set_player(self, player):\n\n self._player = player",
"def set_api_id(self, player_api_id, persist=True):\n self.player_api_id = player_api_id\n if persist:\n update(self)",
"def set_player(self, player_params):\n\n self.saved_player = player_params",
"def rotoworld_player_id(self, rotoworld_player_id):\n\n self._rotoworld_player_id = rotoworld_player_id",
"def player_b_rating_adjustment(self, player_b_rating_adjustment):\n\n self._player_b_rating_adjustment = player_b_rating_adjustment",
"def set_game_id(self, game_name):\n dic = {(''.join(filter(str.isalpha, key))): v for key, v in self.games_map.items()}\n dic = dic[self.league]\n dic = {(''.join(filter(str.isalpha,key))):v for key,v in dic.items()}\n self.game_id = dic[game_name][0]\n self.game_time = dic[game_name][1]",
"def bid_count(self, bid_count):\n\n self._bid_count = bid_count",
"def sportsbook_id(self, sportsbook_id):\n\n self._sportsbook_id = sportsbook_id",
"def set_game_id(self, value: str) -> None:\n self.game_name_entry.delete(0, len(self.game_name_entry.get()))\n self.game_name_entry.insert(0, value)"
] |
[
"0.701637",
"0.68415976",
"0.6529189",
"0.6521133",
"0.6509748",
"0.63791966",
"0.63791966",
"0.63403463",
"0.61362946",
"0.609142",
"0.57009405",
"0.56523895",
"0.5555329",
"0.55444074",
"0.54241943",
"0.54158646",
"0.54158646",
"0.54158646",
"0.5270569",
"0.5130478",
"0.51274794",
"0.5119155",
"0.5099889",
"0.5066116",
"0.5036218",
"0.50175005",
"0.49969223",
"0.49933052",
"0.49742216",
"0.49567193"
] |
0.8964314
|
0
|
Sets the player_a_points of this Game.
|
def player_a_points(self, player_a_points):
self._player_a_points = player_a_points
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def give_points(id_player: str, id_tournament: str, points: float):\n player = Player.get(id_player)\n if player:\n player.set_points(id_tournament, points)",
"def player_a_games(self, player_a_games):\n\n self._player_a_games = player_a_games",
"def assign_points(players):\n pass",
"def player_b_points(self, player_b_points):\n\n self._player_b_points = player_b_points",
"def set_points(self):\n self.white_player.play_against(self.black_player)\n self.black_player.play_against(self.white_player)\n\n if self.ended:\n if self.winner is not None:\n self.winner.points += 1\n else:\n self.white_player.points += 0.5\n self.black_player.points += 0.5",
"def player_a_id(self, player_a_id):\n\n self._player_a_id = player_a_id",
"def assessment_points(self, assessment_points: int):\n\n self._assessment_points = assessment_points",
"def addPoints(self, points):\r\n self.points = points",
"def setPoints(self, value):\n assert type(value) in [int, float]\n self._points = value",
"def display_player_points():\r\n pass",
"def setPointsToTurn(self):\r\n for point in self.points:\r\n point.setActiveTurn()",
"def assign_points(self, dice):\n if not self.assigned:\n self._assigned = True\n self._points = self._calculate_points(dice)",
"def gain_point(self, points):\n self.points += points\n print(f\"Yay! You have gained {points} point(s)! That means you now have {self.points} points!\")",
"def getpoints(self, player):\n return self.Points[player]",
"def pts(self, pts):\n\n self._pts = pts",
"def player_a_name(self, player_a_name):\n\n self._player_a_name = player_a_name",
"def set_score(self, points):\n self.score += points",
"def set_player(self, player):\n\n self._player = player",
"def change_points(self, dp):\n\t\tself._points += dp",
"def player_a_rating(self, player_a_rating):\n\n self._player_a_rating = player_a_rating",
"def SetPoints(self, pts):\n from ..vtkCommonCore import vtkPoints\n if isinstance(pts, vtkPoints):\n p = pts\n else:\n pts = numpyTovtkDataArray(pts)\n p = vtkPoints()\n p.SetData(pts)\n self.VTKObject.SetPoints(p)",
"def update_points(self, correct):\n\n if correct:\n self.points += 10\n \n if self.points > ((self.current_level + 1) * 100):\n self.play_sound('level_up', self.standard_sfx, True)\n self.play_sound(choice(self.correct_voice),self.standard_voice, wait=True)\n self.play_sound('combinations',self.game_sounds, wait=True)\n self.current_level += 1\n print(self.current_level)\n if self.current_level > 4:\n self.current_level = 4",
"def assign_value(self, points):\n \n self.value = int(points)",
"def pointsSetUp(self):\r\n self.background.draw(self.surface)\r\n for i in range(len(self.points)):\r\n self.points[i].organize()\r\n self.points[i].update()\r\n self.points[i].addNumber(i)\r\n self.points[i].setActiveTurn()",
"def set_player(self, new_player):\n self.player = new_player",
"def add_points(self, points):\n pass",
"def point_assigner_win(self, group, player_sprites):\n if group != {}:\n for player in player_sprites:\n player.points += 1",
"def update_score(self, score_point: int):\r\n self._score_point = score_point\r\n self._update_score() # change the visual display of points for the player\r",
"def setPointsActive(self):\r\n for point in self.points:\r\n point.setActive()",
"def set_points(self, points=None, method=None, **kwargs):\n if points is None:\n if not hasattr(self, 'method'):\n if method is not None:\n self.set_method(method)\n else:\n raise ValueError(\"Invalid arguments. Please input points, or set the IDI method first.\")\n self.method.get_points(self, **kwargs) # get_points sets the attribute video.points \n else:\n self.points = np.asarray(points)"
] |
[
"0.70405376",
"0.6953009",
"0.69266933",
"0.6847564",
"0.6813382",
"0.63882875",
"0.60412383",
"0.59575623",
"0.5922945",
"0.59009165",
"0.58660984",
"0.5815809",
"0.5809744",
"0.57961416",
"0.5774984",
"0.57434326",
"0.5727533",
"0.559885",
"0.55913925",
"0.55802286",
"0.55321527",
"0.5519176",
"0.549984",
"0.54920226",
"0.54725885",
"0.54708105",
"0.54679763",
"0.5454468",
"0.5444649",
"0.5440933"
] |
0.90386194
|
0
|
Sets the player_b_points of this Game.
|
def player_b_points(self, player_b_points):
self._player_b_points = player_b_points
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def player_b_games(self, player_b_games):\n\n self._player_b_games = player_b_games",
"def player_a_points(self, player_a_points):\n\n self._player_a_points = player_a_points",
"def player_b_id(self, player_b_id):\n\n self._player_b_id = player_b_id",
"def set_points(self):\n self.white_player.play_against(self.black_player)\n self.black_player.play_against(self.white_player)\n\n if self.ended:\n if self.winner is not None:\n self.winner.points += 1\n else:\n self.white_player.points += 0.5\n self.black_player.points += 0.5",
"def player_b_rating(self, player_b_rating):\n\n self._player_b_rating = player_b_rating",
"def player_b_name(self, player_b_name):\n\n self._player_b_name = player_b_name",
"def setB(self, b):\n\t\tself.b = int(b)",
"def assign_points(players):\n pass",
"def give_points(id_player: str, id_tournament: str, points: float):\n player = Player.get(id_player)\n if player:\n player.set_points(id_tournament, points)",
"def setPoints(self, xa, ya, xb, yb):\n self._origin = QPointF(xa, ya)\n self._end = QPointF(xb, yb)",
"def player_b_rating_adjustment(self, player_b_rating_adjustment):\n\n self._player_b_rating_adjustment = player_b_rating_adjustment",
"def setPoints(self, value):\n assert type(value) in [int, float]\n self._points = value",
"def set_points(self, pts):\n\n self.minX = sys.maxint\n self.minY = sys.maxint\n self.maxX = sys.maxint * -1\n self.maxY = sys.maxint * -1\n\n self.points = []\n #self.mbr = Rect()\n for p in pts:\n x,y = p\n\n if x < self.minX:\n self.minX = x\n if x > self.maxX:\n self.maxX = x\n if y < self.minY:\n self.minY = y\n if y > self.maxY:\n self.maxY = y\n\n self.points.append(Point(x,y))\n\n self.mbr = Rect(Point(self.minX,self.minY),Point(self.maxX,self.maxY))",
"def setBallPos(self, x: int, y: int):\n self.x = x\n self.y = y",
"def b(self, b):\n\n self._b = b",
"def gain_point(self, points):\n self.points += points\n print(f\"Yay! You have gained {points} point(s)! That means you now have {self.points} points!\")",
"def display_player_points():\r\n pass",
"def _step_their_paddle(self):\n if random.random() < self.their_update_probability:\n if self.paddle_l.y < self.ball.y:\n if self.paddle_l.top_bound < self.top_bound:\n self.paddle_l.up()\n else:\n if self.paddle_l.bottom_bound > self.bottom_bound:\n self.paddle_l.down()",
"def addPoints(self, points):\r\n self.points = points",
"def _onSetParameterB(self, value):\n self._parameters['b'] = min(max(value, self._parameters['lower']), self._parameters['upper']) # Limit at upper and lower\n self._logger.info(\"Parameter ba' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())",
"def assign_value(self, points):\n \n self.value = int(points)",
"def pointsSetUp(self):\r\n self.background.draw(self.surface)\r\n for i in range(len(self.points)):\r\n self.points[i].organize()\r\n self.points[i].update()\r\n self.points[i].addNumber(i)\r\n self.points[i].setActiveTurn()",
"def bcp_player_score(self, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player['score'] = int(value)",
"def bcp_set(self, **kwargs):\n pass",
"def update(self):\n self.bpos_x += 3",
"def set_points(self):\n for p in range(len(self.points)):\n self.points[p] = self.points[p] + self.speeds[p]\n if self.points[p].x > SCREEN_DIM[0] or self.points[p].x < 0:\n self.speeds[p].x = -self.speeds[p].x\n if self.points[p].y > SCREEN_DIM[1] or self.points[p].y < 0:\n self.speeds[p].y = -self.speeds[p].y",
"def _updateCollisions(self):\n if self._paddle.collision(self._ball) and self._ball.getVY() <= 0:\n self._points -= PADDLE_POINTS\n self._ball.bouncePaddle(self._paddle)\n # increment _ball._bounces\n self._ball.setBounces(self._ball.getBounces()+1)\n # change speed\n if (self._ball.getBounces() >= BOUNCES_FOR_INCREASE and\n self._ball.findSpeed() <= SPEED_MAX):\n self._ball.incrementSpeed()\n self._paddle.incrementSpeed()\n self._ball.setBounces(0)\n \n stop = False\n for r in range(len(self._bricks)):\n if not stop:\n if self._bricks[r].collision(self._ball):\n self._ball.bounceTopBottom()\n self.addPointsBrick(self._bricks[r])\n if self._determine_new_fp():\n self.createFP(self._bricks[r])\n del self._bricks[r]\n stop = True\n \n for j in self._FP_list:\n if j.collision(self._paddle):\n self._points_FP = j.getValue()",
"def setPointsToTurn(self):\r\n for point in self.points:\r\n point.setActiveTurn()",
"def percent_b(self, percent_b: float):\n\n self._percent_b = percent_b",
"def addPointsBrick(self, brick):\n assert isinstance(brick, Brick)\n if brick.getFillcolor() == FIRST_BRICK_COLOR: # blue brick\n self._points += BRICK_POINTS1\n elif brick.getFillcolor() == SECOND_BRICK_COLOR: # green brick\n self._points += BRICK_POINTS2\n elif brick.getFillcolor() == THIRD_BRICK_COLOR: # yellow brick\n self._points += BRICK_POINTS3\n elif brick.getFillcolor() == FOURTH_BRICK_COLOR: # orange brick\n self._points += BRICK_POINTS4\n elif brick.getFillcolor() == FIFTH_BRICK_COLOR: # red brick\n self._points += BRICK_POINTS5"
] |
[
"0.7156698",
"0.66683024",
"0.6464138",
"0.634216",
"0.62249166",
"0.59966195",
"0.593351",
"0.58669984",
"0.562444",
"0.5574016",
"0.54878134",
"0.54007715",
"0.53405094",
"0.5217778",
"0.51687044",
"0.51381606",
"0.51173633",
"0.5097935",
"0.5085429",
"0.5070251",
"0.5038144",
"0.5036724",
"0.5004232",
"0.50024927",
"0.49924472",
"0.49788797",
"0.4975109",
"0.49693057",
"0.49632403",
"0.4954311"
] |
0.911463
|
0
|
Sets the player_a_games of this Game.
|
def player_a_games(self, player_a_games):
self._player_a_games = player_a_games
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def player_b_games(self, player_b_games):\n\n self._player_b_games = player_b_games",
"def games(self, games):\n\n self._games = games",
"def games_played(self, games_played):\n\n self._games_played = games_played",
"def games_played(self, games_played):\n\n self._games_played = games_played",
"def player_a_points(self, player_a_points):\n\n self._player_a_points = player_a_points",
"def player_a_id(self, player_a_id):\n\n self._player_a_id = player_a_id",
"def player_a_name(self, player_a_name):\n\n self._player_a_name = player_a_name",
"def add_games(self, games):\n for game in games:\n # TODO(kevin): Use Benchapp opponent ID rather than creating a new\n # opponent for every game.\n response = self._session.post(self._add_game_url,\n data={\n 'type': 'GAME',\n 'subType': 'PLAYOFF' if game.playoffs else 'REGULAR',\n 'opponentID': 0,\n 'newTeamName': game.opponent,\n 'homeAway': 'Home' if game.is_home else 'Away',\n 'dateValue': str(game.time.date()),\n 'hr': game.time.strftime('%I'),\n 'min': game.time.strftime('%M'),\n 'am-pm': game.time.strftime('%p'),\n 'duration_hrs': \"1\",\n 'location': game.location\n })\n response.raise_for_status()",
"def set_league(teams, l):\n\t\n\t#~ print teams[0].name\n\t\n\tif l>0 and l<10:\n\t\tfor team in teams:\n\t\t\tteam.league = l\n\t\t\n\telse:\n\t\tprint(\"Error! Only leagues between 1 and 9 can be set.\")\n\t\n\treturn teams",
"def setup_players(self, players):\n\t\tself.players.clear()\n\t\tids = set([p.get_player_id() for p in players])\n\t\tfor p in self.state.get_players():\n\t\t\tif p not in ids:\n\t\t\t\traise PlayerException(p)\n\t\tfor p in players:\n\t\t\tself.players[p.get_player_id()] = p",
"def enter_game_played(self, players_names, winners_names, game, date, group):\n try:\n game_played = GamePlayed()\n game_played.game = Game.objects.get(name__exact=game)\n game_played.date = date\n game_played.group = group\n game_played.save()\n\n for player in players_names:\n game_played.players.add(Player.objects.get(user__first_name__exact=player))\n for winner in winners_names:\n game_played.winners.add(Player.objects.get(user__first_name__exact=winner))\n except:\n print(\"Error entering game\", game)\n pass",
"def set_scores(apps, schema_editor):\n\n Game = apps.get_model(\"stats\", \"Game\")\n for game in Game.objects.all():\n score_allies = 0\n score_opponents = 0\n player_stats = game.playerstat_set.all()\n for stat in player_stats:\n if stat.is_opponent:\n score_opponents += stat.scored\n else:\n score_allies += stat.scored\n\n game.score_allies = score_allies\n game.score_opponents = score_opponents\n game.save()",
"def create_teams(a_season):\n\n team_a = Team(name=\"Team A\", season=a_season)\n team_a.save()\n for p in create_players(7, 0):\n team_a.players.add(p)\n team_a.save()\n team_b = Team(name=\"Team B\", season=a_season)\n team_b.save()\n for p in create_players(7, 16):\n team_b.players.add(p)\n team_b.save()",
"def max_players(self, max_players):\n\n self._max_players = max_players",
"def set_plays(self) -> None:\n player1 = self._get_input('What is the name of player 1?')\n player2 = self._get_input('What is the name of player 2?')\n self.state = State(player1, player2)",
"def score_game(self):\r\n players = self.player_control.get_players()\r\n ###game_control = self.game_control\r\n ###if game_control is not None:\r\n ### game_control.set_vals() # Update any changed game control settings\r\n if len(players) == 0:\r\n return # No players\r\n n_top_score = 0\r\n top_score = players[0].get_score()\r\n for player in players:\r\n if player.get_score() > top_score:\r\n top_score = player.get_score()\r\n for player in players:\r\n player_score = player.get_score()\r\n if player_score == top_score:\r\n n_top_score += 1\r\n \r\n for player in players:\r\n player_score = player.get_score()\r\n player_played = player.get_played()\r\n player_ties = player.get_ties()\r\n player_wins = player.get_wins()\r\n new_played = player_played+1\r\n player.set_played(new_played)\r\n player.set_prop(\"played\")\r\n if player_score == top_score:\r\n if n_top_score > 1:\r\n new_ties = player_ties + 1\r\n player.set_ties(new_ties)\r\n player.set_prop(\"ties\")\r\n else:\r\n new_wins = player_wins + 1\r\n player.set_wins(new_wins)\r\n player.set_prop(\"wins\")\r\n self.update_score_window()",
"def update_scores(self, AI_win):\n self.games_played += 1\n if not AI_win:\n self.games_won += 1\n else:\n self.games_lost += 1",
"def __init__(self, players):\n\n self._players = players\n self._game = None",
"def set_game_id(self, game_name):\n dic = {(''.join(filter(str.isalpha, key))): v for key, v in self.games_map.items()}\n dic = dic[self.league]\n dic = {(''.join(filter(str.isalpha,key))):v for key,v in dic.items()}\n self.game_id = dic[game_name][0]\n self.game_time = dic[game_name][1]",
"async def fill_game_list(self):\n games = await self.fetch_games()\n Game.MAPPING = games",
"def resetPlayerBetAmount(self, players):\n\t\tfor x in players:\n\t\t\tx.betAmount = []",
"def startGame(self):\n\n\t\tfor name in self.players.keys():\n\t\t\tself.startPlayerGame((name, 0))\n\t\tself.setupGuiSignals()",
"def set_assignment_game_frame(self, assignment):\n\n self.frames[\"game\"].set_assignment(assignment)",
"async def _reset_games(self, ctx):\n data = await self.get_data(ctx)\n await data.Games.clear()\n msg = (\"{0.name} ({0.id}) restored casino games to \"\n \"default settings.\").format(ctx.author)\n await ctx.send(msg)",
"def goals_per_game(self, goals_per_game):\n\n self._goals_per_game = goals_per_game",
"def test_play_game_hard(self):\r\n wins = [0,0,0]\r\n\r\n for i in range(1,10):\r\n a_player_1_id = 1\r\n a_player_2_id = 2\r\n a_players = [RandomPlayer(a_player_1_id), RandomPlayer(a_player_2_id)]\r\n a_x_dist = i\r\n a_y_dist = i\r\n a_num_to_win = 3\r\n a_game = Game(a_players,a_x_dist,a_y_dist,a_num_to_win)\r\n a_game.play_game()\r\n\r\n wins[a_game.winner] += 1\r\n\r\n print(wins)",
"def populate_games():\n global games\n games[0] = SnakeGame(lp)",
"def set_player(self, new_player):\n self.player = new_player",
"def get_player_states(self, players):\n for player in self.players.values():\n p = players.add()\n p.id = player.id\n p.pos.CopyFrom(player.pos.to_protocol())",
"def play_multiple_games(players, num_games=10, seed=2):\n total_games_winners = {}\n for player in players:\n if player.name not in total_games_winners:\n total_games_winners[player.name] = 0\n random.seed(seed)\n for game in range(num_games):\n print('-------- Game', game, '--------')\n random.shuffle(players)\n print('Initial game positions: ', players, '\\n')\n if all(x > 1 for x in [p.amount for p in players]):\n rotation_winners = play_multiple_rotations(players)\n for player_name in total_games_winners:\n total_games_winners[player_name] += rotation_winners[player_name]\n print()\n # print('Final Win Count: ', total_games_winners)\n print(players)"
] |
[
"0.70214266",
"0.6884233",
"0.62673855",
"0.62673855",
"0.6084447",
"0.6069749",
"0.5723313",
"0.5631387",
"0.52389956",
"0.5235685",
"0.5232058",
"0.5165864",
"0.50988805",
"0.5088275",
"0.5049184",
"0.504764",
"0.5032063",
"0.5024373",
"0.50055563",
"0.50052094",
"0.49983767",
"0.49864033",
"0.49846697",
"0.49491394",
"0.49480706",
"0.49410564",
"0.4934205",
"0.49213168",
"0.49188283",
"0.4914307"
] |
0.8954244
|
0
|
Sets the player_b_games of this Game.
|
def player_b_games(self, player_b_games):
self._player_b_games = player_b_games
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def player_a_games(self, player_a_games):\n\n self._player_a_games = player_a_games",
"def games(self, games):\n\n self._games = games",
"def player_b_points(self, player_b_points):\n\n self._player_b_points = player_b_points",
"def player_b_id(self, player_b_id):\n\n self._player_b_id = player_b_id",
"def player_b_name(self, player_b_name):\n\n self._player_b_name = player_b_name",
"def games_played(self, games_played):\n\n self._games_played = games_played",
"def games_played(self, games_played):\n\n self._games_played = games_played",
"def set_blists(self, blists):\n self.blists = blists[:]",
"def setB(self, b):\n\t\tself.b = int(b)",
"def player_b_rating(self, player_b_rating):\n\n self._player_b_rating = player_b_rating",
"def set_game_state(self,winner):\r\n if winner == 'b':\r\n self._game_state = \"BLACK_WON\"\r\n else:\r\n self._game_state = \"RED_WON\"",
"def commit_games_to_db(self, games):\n print ' '\n \n num_games = len(games)\n game_num = 0\n \n for game in games:\n update_progress(game_num, num_games)\n game_num += 1\n self.add_game(game)\n self.add_stats(game)",
"def set_league(teams, l):\n\t\n\t#~ print teams[0].name\n\t\n\tif l>0 and l<10:\n\t\tfor team in teams:\n\t\t\tteam.league = l\n\t\t\n\telse:\n\t\tprint(\"Error! Only leagues between 1 and 9 can be set.\")\n\t\n\treturn teams",
"def resetPlayerBetAmount(self, players):\n\t\tfor x in players:\n\t\t\tx.betAmount = []",
"def add_games(self, games):\n for game in games:\n # TODO(kevin): Use Benchapp opponent ID rather than creating a new\n # opponent for every game.\n response = self._session.post(self._add_game_url,\n data={\n 'type': 'GAME',\n 'subType': 'PLAYOFF' if game.playoffs else 'REGULAR',\n 'opponentID': 0,\n 'newTeamName': game.opponent,\n 'homeAway': 'Home' if game.is_home else 'Away',\n 'dateValue': str(game.time.date()),\n 'hr': game.time.strftime('%I'),\n 'min': game.time.strftime('%M'),\n 'am-pm': game.time.strftime('%p'),\n 'duration_hrs': \"1\",\n 'location': game.location\n })\n response.raise_for_status()",
"def betting_market_splits(self, betting_market_splits):\n\n self._betting_market_splits = betting_market_splits",
"def set_game_state(self, game_state):\n\n self._game_state = game_state",
"def set_plays(self) -> None:\n player1 = self._get_input('What is the name of player 1?')\n player2 = self._get_input('What is the name of player 2?')\n self.state = State(player1, player2)",
"def max_players(self, max_players):\n\n self._max_players = max_players",
"def __init__(self, players):\n\n self._players = players\n self._game = None",
"def set_player_wins(self, player, wins):\n # Get the current time stamp\n time_stamp = time()\n\n # Is this a new winner?\n if player.unique_id not in self:\n\n # Add the new winner to the database\n self.cursor.execute(\n 'INSERT INTO gungame_winners (name, unique_id, wins, '\n 'time_stamp, last_win) VALUES(?, ?, ?, ?, ?)',\n (player.name, player.unique_id, 0, time_stamp, time_stamp)\n )\n\n # Get the winner's instance\n instance = self[player.unique_id]\n\n # Set the values for the instance\n instance.name = player.name\n instance.wins = wins\n instance.time_stamp = time_stamp\n instance.last_win = time_stamp\n\n # Update the winner's values in the database\n self.cursor.execute(\n 'UPDATE gungame_winners SET name=?, time_stamp=?, '\n 'wins=?, last_win=? WHERE unique_id=?', (\n player.name, instance.time_stamp, instance.wins,\n instance.last_win, player.unique_id,\n )\n )\n\n # Commit the changes to the database\n self.connection.commit()",
"def blind_bet(self):\n self.this_player.bet(SMALL_BLIND_BET)\n self.other_player.bet(BIG_BLIND_BET)\n if SHOW_MESSAGE:\n print(\"Making blind bets.\")\n print(\"Player1:\")\n self.player1.show()\n print(\"Player2:\")\n self.player2.show()",
"def evaluate_against_bot(self, opponent_bot, num_games, \n num_white_pieces = None, \n num_black_pieces = None,\n max_num_of_turns = 1000):\n zero_bot_player = 1\n score = 0\n num_games_won_as_black = 0\n num_games_won_as_white = 0\n \n # Play 'num_games' games of brandubh\n for i in range(num_games):\n print('\\rPlaying game {0}, score: w = {1}, b = {2}.'.format(i, \n num_games_won_as_white, num_games_won_as_black),end='')\n \n # If a maximum number of white or black pieces is given, then\n # use a random starting position for the game.\n if num_white_pieces or num_black_pieces:\n starting_board = random_starting_position(num_white_pieces, \n num_black_pieces)\n game = GameState.new_game(starting_board)\n else:\n game = GameState.new_game()\n \n # Get both bots to play a game of brandubh.\n turns_taken = 0\n while game.is_not_over() and turns_taken < max_num_of_turns:\n if game.player == zero_bot_player:\n action = self.select_move(game)\n else:\n action = opponent_bot.select_move(game) \n game.take_turn_with_no_checks(action)\n turns_taken += 1\n \n \n # At the end of the game, increment counts keeping track of how\n # many games the current bot won against the opponent bot and \n # get the bots to switch sides for the next game.\n if turns_taken < max_num_of_turns:\n score += zero_bot_player*game.winner\n if zero_bot_player == game.winner:\n if zero_bot_player == 1:\n num_games_won_as_white += 1\n else:\n num_games_won_as_black += 1 \n zero_bot_player *= -1\n \n else:\n score -= 1\n zero_bot_player *= -1\n \n print(' done.')\n # Return the evaluation score of the bot along with fraction of games\n # won as black/white, the total number of games and the number of\n # epochs the bot has trained for before being evaluated.\n return [score/num_games, 2*num_games_won_as_white/num_games,\n 2*num_games_won_as_black/num_games, \n num_games, len(self.loss_history)]",
"def updateGameState(self) -> None:\n\n # movement updates of players and ball\n self.ball.move()\n self.player_one.move()\n if self.hasComputerPlayer:\n self.player_two.computer_move(self.ball)\n else:\n self.player_two.move()\n\n # collision handling of the ball\n if self.ball.didCollideWithPlayer(self.player_one) or self.ball.didCollideWithPlayer(self.player_two):\n if self.ball.flip_velocity(mode=\"x\"): # flip velocity vector on x-axis\n self.playSound(\"player_collision\", 0.6)\n\n # change the sensitivity and speed of the computer player every bounce to make the game more interesting\n if self.hasComputerPlayer:\n self.player_two.setRandomSensitivitySpeed()\n\n if self.ball.did_collide_top_bottom():\n self.ball.flip_velocity(mode=\"y\") # flip velocity vector on y-axis\n self.playSound(\"wall_collision\")\n\n # determine winner of the round\n round_winner = self.ball.determine_round_winner()\n if round_winner:\n\n self.playSound(\"fail\", 0.3)\n\n if round_winner == 1:\n # increase player one score\n self.updateScore(1)\n # determine, if the player has won 5 rounds\n if self._score[0] == 5:\n self.startGameOverScreen(1)\n elif round_winner == 2:\n # increase player two score\n self.updateScore(2)\n # determine, if the player has won 5 rounds\n if self._score[1] == 5:\n self.startGameOverScreen(2)\n\n self.ball.reset() # reset the velocity and position of the ball",
"def setBoard(self, board):\n\t\tself.gameBoard = board",
"def bcp_player_variable(self, name, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player[name] = value",
"def set_game_params(self, board):\n self.board = board.copy()\n self.n_rows = len(self.board[0]) # cols number\n self.n_cols = len(self.board) # rows number\n self.fruits_ttl = min(self.n_rows,self.n_cols)+1\n player_pos = np.where(board == 1)\n rival_pos = np.where(board == 2)\n self.locations[PLAYER] = tuple(ax[0] for ax in player_pos)\n self.locations[RIVAL] = tuple(ax[0] for ax in rival_pos)\n self.turns = 0\n self.max_turns = reachables(self.board,self.locations[PLAYER])\n self.player_turns = self.max_turns // 2",
"def reset_battles(self) -> None:\n for battle in list(self._battles.values()):\n if not battle.finished:\n raise EnvironmentError(\n \"Can not reset player's battles while they are still running\"\n )\n self._battles = {}",
"def bcp_player_score(self, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player['score'] = int(value)",
"def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)"
] |
[
"0.6660215",
"0.61440706",
"0.61138666",
"0.5897767",
"0.5834286",
"0.55333114",
"0.55333114",
"0.53141403",
"0.51146567",
"0.5081129",
"0.5071948",
"0.5016322",
"0.4920143",
"0.49178362",
"0.48985335",
"0.48405293",
"0.48397624",
"0.47612613",
"0.4701193",
"0.468332",
"0.46759504",
"0.46755117",
"0.46717194",
"0.4665667",
"0.4664662",
"0.46551841",
"0.4618558",
"0.4608601",
"0.4607961",
"0.45943344"
] |
0.9111923
|
0
|
Sets the player_a_rating of this Game.
|
def player_a_rating(self, player_a_rating):
self._player_a_rating = player_a_rating
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def player_a_rating_adjustment(self, player_a_rating_adjustment):\n\n self._player_a_rating_adjustment = player_a_rating_adjustment",
"def player_b_rating(self, player_b_rating):\n\n self._player_b_rating = player_b_rating",
"def set_rating(self, value):\n try:\n self._rating = float(value)\n except ValueError:\n pass",
"def a_rate(self, a_rate):\n\n self._a_rate = a_rate",
"def rating(self, rating):\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating > 5): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value less than or equal to `5`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating < 1): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value greater than or equal to `1`\") # noqa: E501\n\n self._rating = rating",
"def player_a_id(self, player_a_id):\n\n self._player_a_id = player_a_id",
"def user_rating(self, rating: int):\n if type(rating) == int:\n self._user_rating = rating\n if type(rating) != int:\n raise ValueError\n if rating < 0 or rating > 5:\n print('Rating is out of 5. Please enter an integer from 0 to 5.')\n self._user_rating = None",
"def setA(self, a):\n\t\tself.a = int(a)",
"def ratings(self, ratings):\n\n self._ratings = ratings",
"def ratings(self, ratings):\n\n self._ratings = ratings",
"def player_a_name(self, player_a_name):\n\n self._player_a_name = player_a_name",
"def objective_player_score(self, objective_player_score):\n\n self._objective_player_score = objective_player_score",
"def set_player(self, player):\n\n self._player = player",
"def player_a_games(self, player_a_games):\n\n self._player_a_games = player_a_games",
"def combat_player_score(self, combat_player_score):\n\n self._combat_player_score = combat_player_score",
"def update_rating_average(self, rating):\n self.num_ratings += 1\n self.rating_total += rating\n self.save(update_fields=[\"num_ratings\", \"rating_total\"])\n self.average_rating = int(round(self.rating_total/self.num_ratings))\n self.save(update_fields=[\"average_rating\"])\n return",
"def player_b_rating_adjustment(self, player_b_rating_adjustment):\n\n self._player_b_rating_adjustment = player_b_rating_adjustment",
"def updateScore(self, player: int) -> None:\n\n if player == 1:\n self._score[0] += 1\n elif player == 2:\n self._score[1] += 1\n\n # logging\n logger.info(\"Player {winner} has scored a goal. Score: {score}\", winner=player, score=str(self._score))",
"def player_a_points(self, player_a_points):\n\n self._player_a_points = player_a_points",
"def add_rating(self, rating):\n if rating >= 0 and rating <= 4:\n self.ratings.append(rating)\n else:\n print(\"Invalid Rating\")",
"def set_score(self, a, b, score):\n ### FILL IN ###",
"def __playerRating(player, db):\n key = player['id']\n if key not in __ratingCache:\n c = db.cursor()\n c.execute(\"SELECT rating FROM PLAYERS WHERE id='%s'\" %\n player['email-hash'])\n rating = c.fetchone()[0]\n __ratingCache[key] = rating\n else:\n rating = __ratingCache[key]\n\n return rating",
"def set_score(self,score):\n self._score = score",
"def rating_id(self, rating_id: int):\n\n self._rating_id = rating_id",
"def set_score(self, score):\n self._score = score",
"def get_rating(self):\n self.rating = imdb.get_title_ratings(self.ID)['rating']",
"def set_player(self, new_player):\n self.player = new_player",
"def rating_date(self, rating_date):\n\n self._rating_date = rating_date",
"def update_rating(self, new_rating: float, date: Union[str, float]):\n self.logger.info(f\"Updating rating for {self.id}: {self.rating:.3f} --> {new_rating:.3f}\")\n self.rating = new_rating\n self._update_rating_history(rating=new_rating, date=date)",
"def add_rating(self, rating):\n if not rating or rating < 0 or rating > 4:\n return \"Rating {rating} is not valid. Valid ratings are between 0 and 4\".format(rating=rating)\n else:\n self.ratings.append(rating)"
] |
[
"0.7204794",
"0.700881",
"0.66364235",
"0.64302677",
"0.61229753",
"0.60579",
"0.58704174",
"0.5863217",
"0.58583367",
"0.58583367",
"0.56981266",
"0.5688521",
"0.5648215",
"0.5619677",
"0.55893576",
"0.55828285",
"0.55657667",
"0.5524322",
"0.55239767",
"0.54368836",
"0.5390374",
"0.5382897",
"0.5361584",
"0.53529817",
"0.5315428",
"0.53044224",
"0.52752626",
"0.5272475",
"0.5259859",
"0.5214899"
] |
0.904459
|
0
|
Sets the player_b_rating of this Game.
|
def player_b_rating(self, player_b_rating):
self._player_b_rating = player_b_rating
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def player_b_rating_adjustment(self, player_b_rating_adjustment):\n\n self._player_b_rating_adjustment = player_b_rating_adjustment",
"def player_a_rating(self, player_a_rating):\n\n self._player_a_rating = player_a_rating",
"def setB(self, b):\n\t\tself.b = int(b)",
"def player_b_id(self, player_b_id):\n\n self._player_b_id = player_b_id",
"def set_rating(self, value):\n try:\n self._rating = float(value)\n except ValueError:\n pass",
"def player_b_games(self, player_b_games):\n\n self._player_b_games = player_b_games",
"def player_b_points(self, player_b_points):\n\n self._player_b_points = player_b_points",
"def percent_b(self, percent_b: float):\n\n self._percent_b = percent_b",
"def player_b_name(self, player_b_name):\n\n self._player_b_name = player_b_name",
"def b(self, b):\n\n self._b = b",
"def _set_bet_limit(self) -> None:\n for i, ratio in enumerate(BET_LIMIT_RATIOS):\n self._bet_limits[i] = self._treasury_min.get() // ratio",
"def update_average_book_rating(self, isbn):\n self.cursor.execute(\"\"\"UPDATE book SET avg_rating = total_rating_score / num_ratings WHERE \n ISBN=%s\"\"\", (isbn,))\n self.db.commit()",
"def set_score(self, a, b, score):\n ### FILL IN ###",
"def bid_percentage(self, bid_percentage):\n\n self._bid_percentage = bid_percentage",
"def set_boost(self, boost):\r\n self._boost = float(boost)\r\n return self",
"def update_boy(self, hash, new_rate):\n image = self._db.boys.find_one({'_id': hash})\n total_average = self.average(image['rating'], new_rate, image['count'])\n\n self._db.boys.find_one_and_update(\n {'_id': hash}, {'$inc': {'count': 1},\n '$set': {'rating': total_average}},\n return_document=pymongo.ReturnDocument.AFTER)",
"def bcp_player_score(self, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player['score'] = int(value)",
"def update_g_score(self, value):\n self.g_score = value",
"def user_rating(self, rating: int):\n if type(rating) == int:\n self._user_rating = rating\n if type(rating) != int:\n raise ValueError\n if rating < 0 or rating > 5:\n print('Rating is out of 5. Please enter an integer from 0 to 5.')\n self._user_rating = None",
"def rating(self, rating):\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating > 5): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value less than or equal to `5`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating < 1): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value greater than or equal to `1`\") # noqa: E501\n\n self._rating = rating",
"def set_bid(self, bid):\n self.__bid = bid",
"def set_bribe(self, bribe_amount):\r\n self.bribe = bribe_amount",
"def buying_rate(self, buying_rate):\n\n self._buying_rate = buying_rate",
"def __set_bias_score(self, row, best_score):\n if row.productid == best_score:\n result = 1\n else:\n result = row.final_score\n\n return result",
"def player_a_rating_adjustment(self, player_a_rating_adjustment):\n\n self._player_a_rating_adjustment = player_a_rating_adjustment",
"def percent_b(self) -> float:\n return self._percent_b",
"def bairro(self, bairro):\n self._bairro = bairro",
"def node_b(self, node_b):\n\n self._node_b = node_b",
"def bound_rating(self, rating):\n return 1.0 * max(0, min(int(rating + 0.5), 5))\n # return 1.0 * max(0, min(rating, 5))",
"def a_rate(self, a_rate):\n\n self._a_rate = a_rate"
] |
[
"0.7433762",
"0.6573614",
"0.63315046",
"0.60228175",
"0.5984338",
"0.59810424",
"0.5883635",
"0.58609754",
"0.5827438",
"0.5546673",
"0.5501033",
"0.53830224",
"0.530768",
"0.52600473",
"0.5225375",
"0.51287466",
"0.5124128",
"0.5068001",
"0.5044063",
"0.50251865",
"0.5018571",
"0.500026",
"0.49983478",
"0.49834093",
"0.49760944",
"0.49736387",
"0.49651387",
"0.49547803",
"0.49500334",
"0.49416372"
] |
0.91605
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.