query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Process posts and map posted document names to post details in the environment.
|
def process_posts(app, doctree):
env = app.builder.env
if not hasattr(env, "ablog_posts"):
env.ablog_posts = {}
post_nodes = list(doctree.findall(PostNode))
if not post_nodes:
return
post_date_format = app.config["post_date_format"]
should_auto_orphan = app.config["post_auto_orphan"]
docname = env.docname
if should_auto_orphan:
# mark the post as 'orphan' so that
# "document isn't included in any toctree" warning is not issued
# We do not simply assign to should_auto_orphan because if auto-orphan
# is false, we still want to respect the per-post :rst:dir`orphan` setting
app.env.metadata[docname]["orphan"] = True
blog = Blog(app)
auto_excerpt = blog.post_auto_excerpt
multi_post = len(post_nodes) > 1 or blog.post_always_section
for order, node in enumerate(post_nodes, start=1):
if node["excerpt"] is None:
node["excerpt"] = auto_excerpt
if multi_post:
# section title, and first few paragraphs of the section of post
# are used when there are more than 1 posts
section = node
while True:
if isinstance(section, nodes.section):
break
section = node.parent
else:
section = doctree
# get updates here, in the section that post belongs to
# Might there be orphan updates?
update_dates = _get_update_dates(section, docname, post_date_format)
# Making sure that post has a title because all post titles
# are needed when resolving post lists in documents
title = node["title"] or _get_section_title(section)
# creating a summary here, before references are resolved
excerpt = []
if node.children:
if node["exclude"]:
node.replace_self([])
else:
node.replace_self(node.children)
for child in node.children:
excerpt.append(child.deepcopy())
elif node["excerpt"]:
count = 0
for nod in section.findall(nodes.paragraph):
excerpt.append(nod.deepcopy())
count += 1
if count >= (node["excerpt"] or 0):
break
node.replace_self([])
else:
node.replace_self([])
nimg = node["image"] or blog.post_auto_image
if nimg:
for img, nod in enumerate(section.findall(nodes.image), start=1):
if img == nimg:
excerpt.append(nod.deepcopy())
break
date = node["date"]
if date:
try:
date = datetime.strptime(date, post_date_format)
except ValueError:
if date_parser:
try:
date = date_parser(date)
except ValueError:
raise ValueError("invalid post date in: " + docname)
else:
raise ValueError(
f"invalid post date ({date}) in " + docname + f". Expected format: {post_date_format}"
)
else:
date = None
# if docname ends with `index` use folder name to reference the document
# a potential problem here is that there may be files/folders with the
# same name, so issuing a warning when that's the case may be a good idea
folder, label = os.path.split(docname)
if label == "index":
folder, label = os.path.split(folder)
if not label:
label = slugify(title)
section_name = ""
if multi_post and section.parent is not doctree:
section_name = section.attributes["ids"][0]
label += "-" + section_name
else:
# create a reference for the post
# if it is posting the document
# ! this does not work for sections
app.env.domains["std"].data["labels"][label] = (docname, label, title)
app.env.domains["std"].data["anonlabels"][label] = (docname, label)
if section.parent is doctree:
section_copy = section[0].deepcopy()
else:
section_copy = section.deepcopy()
# multiple posting may result having post nodes
for nn in section_copy.findall(PostNode):
if nn["exclude"]:
nn.replace_self([])
else:
nn.replace_self(node.children)
postinfo = {
"docname": docname,
"section": section_name,
"order": order,
"date": date,
"update": max(update_dates + [date]),
"title": title,
"excerpt": excerpt,
"tags": node["tags"],
"author": node["author"],
"category": node["category"],
"location": node["location"],
"language": node["language"],
"redirect": node["redirect"],
"nocomments": node["nocomments"],
"image": node["image"],
"exclude": node["exclude"],
"external_link": node["external_link"],
"doctree": section_copy,
}
if docname not in env.ablog_posts:
env.ablog_posts[docname] = []
env.ablog_posts[docname].append(postinfo)
# instantiate catalogs and collections here
# so that references are created and no warnings are issued
if app.builder.format == "html":
stdlabel = env.domains["std"].data["labels"] # NOQA
else:
if hasattr(env, "intersphinx_inventory"):
stdlabel = env.intersphinx_inventory.setdefault("std:label", {}) # NOQA
baseurl = getattr(env.config, "blog_baseurl").rstrip("/") + "/" # NOQA
project, version = env.config.project, str(env.config.version) # NOQA
for key in ["tags", "author", "category", "location", "language"]:
catalog = blog.catalogs[key]
for label in postinfo[key]:
coll = catalog[label] # NOQA
if postinfo["date"]:
coll = blog.archive[postinfo["date"].year] # NOQA
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def preprocess_post(self, post):\n # tokenize, clean, & tag part-of-speech for all words\n if self.document_level == 'postwise':\n\n doc_text = all_comments_from_post(post)\n # leave early if there's nothing there\n if doc_text == '':\n return []\n\n tokens = nltk.word_tokenize(doc_text)\n # TODO: skip this if there's no POS filtering args!\n tagged = nltk.pos_tag(tokens)\n\n # filter out most invalid words with valid_word()\n processed_document = []\n for word, pos_tag in tagged:\n if self.valid_word(word, pos_tag):\n cleaned_word = self.clean_word(word)\n # things like digits and other junk become empty string,\n # so exclude them from final document\n if cleaned_word:\n processed_document.append(cleaned_word)\n # finally, update the post\n post['postwise'] = {'tokens': processed_document, 'text': doc_text}\n self.postman.posts_write.update_one({'_id':post['_id']}, {'$set':post}, upsert=True)\n else:\n raise NotImplementedError('document_level: \"%s\"' % self.document_level)\n\n return processed_document",
"def handle_new_post(post_data, user_agent, remote_addr):\n \n for required in POST_REQUIRED_PARAMS:\n if required not in post_data:\n return None, None\n\n try:\n value = int(string_from_interwebs(post_data.getfirst(\"code\", \"\")))\n except ValueError:\n return None, None\n \n if value != 98098098098:\n return None, None\n\n # not yet safe to use.\n location = post_data.getfirst(\"location\", \"\")\n tags = string_from_interwebs(post_data.getfirst(\"tags\")) \n author = post_data.getfirst(\"author\")\n \n split_tags = [string_from_interwebs(tag).strip().lower() for tag in tags.split(\",\")] # temporary\n \n if len(split_tags) > 3:\n return None, None\n \n author_id = string_from_interwebs(author).strip()\n \n with Connection('localhost', 27017) as connection:\n reply_to = string_from_interwebs(post_data.getfirst(\"reply_to\"))\n \n if not verify_author(author_id, connection):\n return None, None\n\n if not verify_post(reply_to, connection):\n return None, None\n\n # if reply then it's verified.\n # XXX: I need to make a standard object structure for this, so that I don't \n # have to update separate things.\n\n post = {\"viewed\" : 0,\n \"comments\" : 0,\n \"flagged\" : 0,\n \"disliked\" : 0,\n \"enjoyed\" : 0,\n \"num_replies\" : 0,\n \"num_reposts\" : 0,\n \"content-type\" : \"image\", # need to pull this from the mime lookup\n \"file\" : \"placeholder\",\n \"user_agent\" : user_agent,\n \"remote_addr\" : remote_addr,\n \"created\" : datetime.utcnow(),\n \"location\" : string_from_interwebs(location).strip(),\n \"author\" : ObjectId(author_id),\n \"reply_to\" : ObjectId(reply_to),\n \"tags\" : split_tags}\n\n update_post(reply_to, connection)\n\n return post_data.getfirst(\"data\"), post",
"def process(self):\n # tokenize, then filter & otherwise process words in each document\n # using steps in preprocess_doc()\n\n all_posts_count = self.postman.posts_read.find({'subreddit': self.postman.subreddit}).count()\n\n for post_idx, post in enumerate(self.postman.posts_read.find({'subreddit': self.postman.subreddit})):\n # preprocess the post and add the new words to the corpus\n new_words = self.preprocess_post(post)\n self.corpus.update(new_words)\n\n # print on every Nth post so you know it's alive\n if post_idx % 100 == 0:\n print 'done post %i out of %i' % (post_idx, all_posts_count)\n\n #TODO:\n print 'word count and other corpus-level filters not implemented, skipping...'\n # corpus-level filtering\n # get rid of invalid documents (based on word count)\n # self.corpus = [doc for doc in self.corpus if self.doc_has_valid_wc(doc)]\n # print 'filtered out %i out of %i documents' % (pre_corpus_len - len(self.corpus), pre_corpus_len)\n # stem or lemmatize\n # if self.stem_or_lemma_callback:\n # self.corpus = [self.perform_stem_or_lem(doc) for doc in self.corpus]\n # for chaining\n #######################################################\n\n return self",
"def parse_posts(self):\n logger.info(\"Parsing posts\")\n\n self.df.title = self.df.title.str.strip()\n\n spam_companies = [\"Indeed Prime\"]\n self.df = self.df[~self.df[\"company\"].isin(spam_companies)]\n self.df = self.df.dropna(subset=[\"company\"])\n self.df = self.df.drop_duplicates(subset=[\"company\", \"date_posted\", \"title\"])",
"def register_posts(app):\n blog = Blog(app)\n for docname, posts in getattr(app.env, \"ablog_posts\", {}).items():\n for postinfo in posts:\n blog.register(docname, postinfo)",
"def analyse_posts(self, event):\n\n # Iterate over each post listed in \"deployed\"\n for post in event[\"deployed\"]:\n title = post.title()\n\n # Don't send for drafts\n if post.is_draft or post.post_status != \"published\":\n self.logger.info(\n \"Skipping Draft Post {0} with status {1}\".format(\n title, post.post_status\n )\n )\n continue\n\n # Extract some details\n link = post.permalink(absolute=True)\n text = post.text()\n self.logger.info(\"Processing {0}\".format(link))\n\n # Calculate and retrieve the state-key for this URL\n key = \"webmention-info-{0}\".format(link)\n observed_links = self.site.state.get(key)\n\n if not observed_links:\n observed_links = []\n\n # Extract links from within the rendered page\n links = self.extract_links(text)\n\n # Set up a requests session so that HTTP keep-alives can be used where possible\n # this reduces connection overhead etc\n session = requests.session()\n\n # Set the user-agent for all requests in this session\n session.headers.update({\"User-Agent\": \"Nikola SSG Webmention plugin\"})\n\n # Send mentions for each\n for dest in links:\n\n # See whether a webmention's already been sent for this page and url\n # means we won't reping links every time a post is updated\n if dest in observed_links:\n continue\n\n sent, has_mentions = self.send_webmention(link, dest, session)\n\n # We want to cache two categories of link\n #\n # Has webmentions, sent successfully\n # Does not have webmentions\n\n if sent or not has_mentions:\n observed_links.append(dest)\n\n # Now that all links have been processed, save the state\n self.site.state.set(key, observed_links)",
"def read_post_to_file(country_name: str, post: dict, data_path: str, order: int):\n\n logging.info(f\"[READ POST] Reading post\")\n\n raw_title = post['title']['rendered']\n raw_content = post['content']['rendered']\n\n cleaned_title = clean_html(raw_title)\n cleaned_content = clean_html(raw_content)\n \n post_title = cleaned_title.translate(str.maketrans(\"\", \"\", string.punctuation))\n file_name = f\"{country_name}{order:03}_{post_title[:50]}.txt\"\n file_path = os.path.join(\n data_path,\n file_name,\n )\n\n post_file = open(\n file_path,\n 'w'\n )\n\n post_file.write(f\"{cleaned_title}\\n\")\n post_file.write(f\"{cleaned_content}\")\n\n return file_name, file_path",
"def process_postlist(app, doctree, docname):\n blog = Blog(app)\n if not blog:\n register_posts(app)\n for node in doctree.findall(PostList):\n colls = []\n for cat in [\"tags\", \"author\", \"category\", \"location\", \"language\"]:\n for coll in node[cat]:\n if coll in blog.catalogs[cat].collections:\n colls.append(blog.catalogs[cat].collections[coll])\n if colls:\n posts = set(blog.posts)\n for coll in colls:\n posts = posts & set(coll)\n posts = list(posts)\n posts.sort(reverse=True)\n posts = posts[: node.attributes[\"length\"]]\n else:\n posts = list(blog.recent(node.attributes[\"length\"], docname, **node.attributes))\n if node.attributes[\"sort\"]:\n posts.sort() # in reverse chronological order, so no reverse=True\n fmts = list(Formatter().parse(node.attributes[\"format\"]))\n not_in = {\"date\", \"title\", \"author\", \"location\", \"language\", \"category\", \"tags\", None}\n for text, key, __, __ in fmts:\n if key not in not_in:\n raise KeyError(f\"{key} is not recognized in postlist format\")\n excerpts = node.attributes[\"excerpts\"]\n expand = node.attributes[\"expand\"]\n date_format = node.attributes[\"date\"] or _(blog.post_date_format_short)\n bl = nodes.bullet_list()\n bl.attributes[\"classes\"].append(\"postlist-style-\" + node[\"list-style\"])\n bl.attributes[\"classes\"].append(\"postlist\")\n for post in posts:\n bli = nodes.list_item()\n bli.attributes[\"classes\"].append(\"ablog-post\")\n bl.append(bli)\n par = nodes.paragraph()\n bli.append(par)\n for text, key, __, __ in fmts:\n if text:\n par.append(nodes.Text(text))\n if key is None:\n continue\n if key == \"date\":\n par.append(nodes.Text(post.date.strftime(date_format)))\n else:\n if key == \"title\":\n items = [post]\n else:\n items = getattr(post, key)\n\n for i, item in enumerate(items, start=1):\n if key == \"title\":\n ref = nodes.reference()\n if item.options.get(\"external_link\"):\n ref[\"refuri\"] = post.options.get(\"external_link\")\n else:\n ref[\"refuri\"] = app.builder.get_relative_uri(docname, item.docname)\n ref[\"internal\"] = True\n ref[\"ids\"] = []\n ref[\"backrefs\"] = []\n ref[\"dupnames\"] = []\n ref[\"classes\"] = []\n ref[\"names\"] = []\n ref.append(nodes.Text(str(item)))\n par.attributes[\"classes\"].append(\"ablog-post-title\")\n else:\n ref = _missing_reference(app, item.xref, docname)\n par.append(ref)\n if i < len(items):\n par.append(nodes.Text(\", \"))\n if excerpts and post.excerpt:\n for enode in post.excerpt:\n enode = enode.deepcopy()\n enode.attributes[\"classes\"].append(\"ablog-post-excerpt\")\n revise_pending_xrefs(enode, docname)\n app.env.resolve_references(enode, docname, app.builder)\n enode.parent = bli.parent\n bli.append(enode)\n if expand:\n ref = app.builder.get_relative_uri(docname, post.docname)\n enode = nodes.paragraph()\n enode.attributes[\"classes\"].append(\"ablog-post-expand\")\n refnode = nodes.reference(\"\", \"\", internal=True, refuri=ref)\n innernode = nodes.emphasis(text=expand)\n refnode.append(innernode)\n enode.append(refnode)\n bli.append(enode)\n node.replace_self(bl)",
"def save_posts(self):\n logger.info(\"Savings posts to database\")\n records = self.df.to_dict(\"records\")\n\n for record in records:\n Company.objects.get_or_create(name=record[\"company\"])\n\n Post.objects.get_or_create(\n title=record[\"title\"],\n company_id=record[\"company\"],\n defaults={\n \"date_posted\": record[\"date_posted\"],\n \"description\": record[\"description\"],\n \"location\": record[\"location\"],\n \"is_sponsored\": False,\n \"date_added_db\": record[\"date_added_db\"],\n \"source_id\": record[\"source\"],\n \"link\": record[\"link\"],\n },\n )",
"def refactor_post(self,post_name):\n for name in list(self.rules):\n related_post = \"{}.post.{}\".format(name,post_name)\n if related_post in self.rules:\n parts = [self.MakeSymbolName(x) for x in [post_name, related_post]]\n self.rules[name] = self.MakeChoice([self.MakeSeq(parts)])",
"def update_post(prev_data, data, db_conn):\n\n schema = get_post_schema(data)\n post_kind = prev_data['kind']\n if post_kind is 'post' or post_kind is 'proposal':\n data = pick(data, ('body',))\n elif post_kind is 'vote':\n data = pick(data, ('body', 'response',))\n data, errors = update_document(schema, prev_data, data, db_conn)\n if not errors:\n add_post_to_es(data, db_conn)\n return data, errors",
"def deliver_post(data, access=None):\n\n schema = get_post_schema(data)\n return deliver_fields(schema, data, access)",
"def process_posts_and_pages(*, posts, pages, settings):\n for post in posts:\n post.test_attr = 'post'\n for page in pages:\n page.test_attr = 'page'\n return {'posts': posts, 'pages': pages}",
"def insert_post(data, db_conn):\n\n schema = get_post_schema(data)\n data, errors = insert_document(schema, data, db_conn)\n if not errors:\n add_post_to_es(data, db_conn)\n return data, errors",
"def extract_posts(posts_file, output_filename=direc+\"/posts.txt\"):\r\n if not os.path.exists(output_filename.split(\"/\")[0]):\r\n os.makedirs(output_filename.split(\"/\")[0])\r\n\r\n print(\"Extracting posts from \" + posts_file + \"...\")\r\n posts_dict = {}\r\n with open(output_filename, 'w', encoding=encoding) as f:\r\n current = 0\r\n for event, child in iterparse(posts_file, events=('start', 'end')):\r\n if current > SAMPLE_SIZE:\r\n break\r\n elif len(child.attrib) > 0 and event == \"start\":\r\n line = \"\"\r\n if child.attrib['PostTypeId'] == '1' and 'AcceptedAnswerId' in child.attrib:\r\n posts_dict[child.attrib['Id']] = {'accepted': child.attrib['AcceptedAnswerId'], 'other': []}\r\n clean_title = clean_markdown(child.attrib['Title'])\r\n clean_body = clean_markdown(child.attrib['Body'])\r\n line = child.attrib['Id'] + \"\\t\" + clean_title + \"\\t\" + clean_body + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n current += 1\r\n elif child.attrib['PostTypeId'] == '2':\r\n if child.attrib['ParentId'] in posts_dict and not child.attrib['Id'] == posts_dict[child.attrib['ParentId']]['accepted']:\r\n posts_dict[child.attrib['ParentId']]['other'].append(child.attrib['Id'])\r\n clean_body = clean_markdown(child.attrib['Body'])\r\n line = child.attrib['Id'] + \"\\t\" + child.attrib['ParentId'] + \"\\t\" + clean_body + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n current += 1\r\n f.write(line)\r\n print_progress(current, SAMPLE_SIZE)\r\n print(\"\\nFinished extracting posts from \" + output_filename + \".\\n\")\r\n return posts_dict",
"def cleanup_post(post):\n \n post_data = post\n post_data[\"id\"] = str(post[\"_id\"])\n post_data[\"author\"] = str(post[\"author\"])\n post_data[\"created\"] = str(post[\"created\"].ctime())\n del post_data[\"_id\"]\n \n if \"reply_to\" in post:\n post_data[\"reply_to\"] = str(post[\"reply_to\"])\n\n if \"repost_of\" in post:\n post_data[\"repost_of\"] = str(post[\"repost_of\"])\n\n return post_data",
"def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings",
"def update_post_format(post):\n\n post_dict = {\n \"title\": post[1],\n \"genre\": get_genre(post[0]),\n \"content\": post[2],\n \"repeater_link\": get_links(post[3], post[4]),\n }\n \n return post_dict",
"def post_to_activity(self, post):\n id = None\n if post.get('id'):\n # strip USERID_ prefix if it's there\n post['id'] = post['id'].split('_', 1)[-1]\n id = post['id']\n\n obj = self.post_to_object(post)\n activity = {\n 'verb': VERBS.get(post.get('type', obj.get('objectType')), 'post'),\n 'published': obj.get('published'),\n 'updated': obj.get('updated'),\n 'id': self.tag_uri(id) if id else None,\n 'url': self.post_url(post),\n 'actor': obj.get('author'),\n 'object': obj,\n }\n\n application = post.get('application')\n if application:\n activity['generator'] = {\n 'displayName': application.get('name'),\n 'id': self.tag_uri(application.get('id')),\n }\n return self.postprocess_activity(activity)",
"def write_postings(docname, postings, dbcon):\n cur = dbcon.cursor()\n for word, posting in postings.items():\n # generate text of indexes\n indexes = \"\"\n for ix in posting[\"indexes\"]:\n indexes += \"{},\".format(ix)\n indexes = indexes.rstrip(\",\")\n # insert into database; nested try is needed to handle rollback\n # and commit properly\n try:\n try:\n cur.execute(\"INSERT INTO IndexWord VALUES (?)\", (word,))\n except sqlite3.IntegrityError: # word already in index\n pass\n cur.execute(\n \"INSERT INTO Posting VALUES (?, ?, ?, ?)\",\n (word, docname, posting[\"frequency\"], indexes)\n )\n except Exception as e:\n print(e)\n dbcon.rollback()\n else:\n dbcon.commit()",
"def parse_post_metadata(post_text):\n result = {}\n \n header_end = 0\n \n promed_date_match = re.search(\n r\"Published Date:\\s(?P<date>.*)\", post_text)\n result[\"promedDate\"] = parse_promed_pub_datetime(\n promed_date_match.group(\"date\"))\n \n archive_match = re.search(r\"Archive Number: (?P<num>.*)\", post_text)\n result[\"archiveNumber\"] = archive_match.group(\"num\")\n header_end = archive_match.end()\n \n subject = re.search(r\"Subject:\\s(?P<subject>.*)\", post_text).group(\"subject\")\n result[\"subject\"] = parse_subject_line(subject)\n result[\"subject\"][\"raw\"] = subject\n \n # This will not find all linked reports.\n # Some older posts refrence posts using different indexes I do not know\n # how to interpret.\n # Example: http://promedmail.org/direct.php?id=2194235\n result[\"linkedReports\"] = [\n report_id for report_id in re.findall(r\"\\d{8}\\.\\d+\", post_text)]\n \n # Most links will be article source urls or links to promed.\n result[\"links\"] = list(set(\n re.findall(r\"http\\S+[^(\\.\\])(\\.\\)>\\s]\", post_text)))\n result[\"links\"].sort()\n \n communicated_match = re.search(communicated_by_regex, post_text, re.M)\n if communicated_match:\n result[\"communicatedBy\"] = communicated_match.group(\"communicated_by\")\n return result, header_end",
"def parse_posts(posts_dict):\n return posts_dict['posts']",
"def parse_post(post: Dict[str, Any],\n image_retriever: str = \"pretrained\",\n image_basedir: Optional[str] = \"documentIntent_emnlp19/resnet18_feat\") -> Dict[str, Any]:\n id = post['id']\n label_intent = post['intent']\n label_semiotic = post['semiotic']\n label_contextual = post['contextual']\n caption = post['caption']\n\n if image_retriever == \"url\":\n image = post['url']\n raise NotImplementedError(\"Currently cannot download an image from {}\".format(image))\n elif image_retriever == \"pretrained\":\n image_path = Path(image_basedir) / \"{}.npy\".format(id)\n image = np.load(image_path)\n elif image_retriever == \"ignored\":\n image = None\n else:\n raise NotImplementedError(\"image_retriever method doesn't exist\")\n\n output_dict = {\n 'id': id,\n 'label': {\n 'intent': label_intent,\n 'semiotic': label_semiotic,\n 'contextual': label_contextual,\n },\n 'caption': caption,\n 'image': image,\n }\n\n return output_dict",
"def Translate(self, infile, outfile, mapfile):\r\n # Create the top-level feed object\r\n feed = []\r\n comments = []\r\n\r\n # Calculate the last updated time by inspecting all of the posts\r\n last_updated = 0\r\n\r\n # These three variables keep the state as we parse the file\r\n post_entry = {} # The current post atom.Entry to populate\r\n comment_entry = {} # The current comment atom.Entry to populate\r\n last_entry = None # The previous post atom.Entry if exists\r\n tag_name = None # The current name of multi-line values\r\n tag_contents = '' # The contents of multi-line values\r\n\r\n # Loop through the text lines looking for key/value pairs\r\n split_re = re.compile('^[A-Z ]+:')\r\n for line in infile:\r\n\r\n # Remove whitespace\r\n line = line.strip()\r\n\r\n # Check for the post ending token\r\n if line == '-' * 8 and tag_name != 'BODY':\r\n if post_entry:\r\n # Add the post to our feed\r\n sys.stderr.write(\"Adding post %s\\n\" % post_entry['title'])\r\n self.add_to_user_map(post_entry.get('author'), post_entry.get('authorEmail'))\r\n feed.insert(0, post_entry)\r\n last_entry = post_entry\r\n\r\n # Reset the state variables\r\n post_entry = {}\r\n comment_entry = {}\r\n tag_name = None\r\n tag_contents = ''\r\n continue\r\n\r\n # Check for the tag ending separator\r\n elif line == '-' * 5:\r\n # Get the contents of the body and set the entry contents\r\n if tag_name == 'BODY':\r\n post_entry['description'] = self._Encode(tag_contents)\r\n\r\n # This is the start of the COMMENT section. Create a new entry for\r\n # the comment and add a link to the original post.\r\n elif tag_name == 'COMMENT':\r\n comment_entry['body'] = self._Encode(tag_contents)\r\n post_entry.setdefault('comments', []).append(comment_entry)\r\n self.add_to_user_map(comment_entry.get('author'), comment_entry.get('authorEmail'))\r\n comment_entry = {}\r\n\r\n # Get the contents of the extended body\r\n elif tag_name == 'EXTENDED BODY':\r\n if post_entry:\r\n post_entry['mt_text_more'] = self._Encode(tag_contents)\r\n elif last_entry:\r\n last_entry['mt_text_more'] = self._Encode(tag_contents)\r\n\r\n # Convert any keywords (comma separated values) into Blogger labels\r\n elif tag_name == 'KEYWORDS':\r\n post_entry['mt_keywords'] = tag_contents\r\n\r\n # Reset the current tag and its contents\r\n tag_name = None\r\n tag_contents = ''\r\n continue\r\n\r\n # Split the line into key/value pairs\r\n key = line\r\n value = ''\r\n if split_re.match(line):\r\n elems = line.split(':')\r\n key = elems[0]\r\n if len(elems) > 1:\r\n value = ':'.join(elems[1:]).strip()\r\n\r\n # The author key indicates the start of a post as well as the author of\r\n # the post entry or comment\r\n if key == 'AUTHOR':\r\n # Create a new entry \r\n entry = {}\r\n\r\n # Add the author's name\r\n author_name = self._Encode(value)\r\n if not author_name:\r\n author_name = 'Anonymous'\r\n entry['author'] = author_name\r\n\r\n # Add the appropriate kind, either a post or a comment\r\n if tag_name == 'COMMENT':\r\n entry['postid'] = post_entry['postid']\r\n comment_entry = entry\r\n else:\r\n entry['postid'] = 'post-' + self._GetNextId()\r\n post_entry = entry\r\n\r\n # The title only applies to new posts\r\n elif key == 'TITLE' and tag_name != 'PING':\r\n post_entry['title'] = self._Encode(value)\r\n\r\n # If the status is a draft, mark it as so in the entry. If the status\r\n # is 'Published' there's nothing to do here\r\n elif key == 'STATUS':\r\n post_entry['status'] = value\r\n\r\n # Turn categories into labels\r\n elif key == 'CATEGORY':\r\n post_entry.setdefault('category', []).append(value)\r\n\r\n # Convert the date and specify it as the published/updated time\r\n elif key == 'DATE' and tag_name != 'PING':\r\n entry = post_entry\r\n if tag_name == 'COMMENT':\r\n entry = comment_entry\r\n entry['dateCreated'] = value\r\n\r\n # Check to see if this was the last post published (so far)\r\n # seconds = time.mktime(time_val)\r\n # last_updated = max(seconds, last_updated)\r\n\r\n # Convert all tags into Blogger labels\r\n elif key == 'TAGS':\r\n post_entry.setdefault('tags', []).append(value)\r\n\r\n # Update the author's email if it is present and not empty\r\n elif tag_name == 'COMMENT' and key == 'EMAIL':\r\n comment_entry['authorEmail'] = value\r\n\r\n # Update the author's URI if it is present and not empty\r\n elif tag_name == 'COMMENT' and key == 'URL':\r\n comment_entry['authorUrl'] = value\r\n\r\n # If any of these keys are used, they contain information beyond this key\r\n # on following lines\r\n elif key in ('COMMENT', 'BODY', 'EXTENDED BODY', 'EXCERPT', 'KEYWORDS', 'PING'):\r\n tag_name = key\r\n\r\n # These lines can be safely ignored\r\n elif key in ('BASENAME', 'ALLOW COMMENTS', 'CONVERT BREAKS', \r\n 'ALLOW PINGS', 'PRIMARY CATEGORY', 'IP', 'URL', 'EMAIL'):\r\n continue\r\n\r\n # If the line is empty and we're processing the body, add a line break\r\n elif (tag_name == 'BODY' or tag_name == 'EXTENDED BODY' or tag_name == 'COMMENT') and len(line) == 0:\r\n tag_contents += '\\n'\r\n\r\n # This would be a line of content beyond a key/value pair\r\n elif len(key) != 0:\r\n tag_contents += line + '\\n'\r\n\r\n\r\n # Update the feed with the last updated time\r\n # feed.updated = atom.Updated(self._ToBlogTime(time.gmtime(last_updated)))\r\n\r\n # Serialize the feed object\r\n yaml.dump(feed, outfile, Dumper=yaml.CDumper)\r\n \r\n # Write out the user map\r\n user_map_dict = {}\r\n for name, email in self.user_map:\r\n user_map_dict[name] = email\r\n yaml.dump(user_map_dict, mapfile, Dumper=yaml.CDumper)",
"def PostData(self, postdata):\n if len(postdata) > 0:\n for key in postdata:\n if postdata[key] == \"%TARGET%\":\n postdata[key] = self._target\n self._postdata = postdata\n else:\n self._postdata = None",
"def inputPostFormatter(post):\n\n post[\"ttl\"] = from_decimal(post[\"ttl\"])\n post[\"workToProve\"] = from_decimal(post.get(\"workToProve\", 0))\n post[\"priority\"] = from_decimal(post[\"priority\"])\n\n if not is_array(post.get(\"topics\")):\n post[\"topics\"] = [post[\"topics\"]] if post.get(\"topics\") else []\n\n post[\"topics\"] = [topic if is_0x_prefixed(topic) else encode_hex(topic)\n for topic in post[\"topics\"]]\n\n return post",
"def wp2fields(xml, wp_custpost=False):\r\n\r\n items = get_items(xml)\r\n for item in items:\r\n\r\n if item.find('status').string == \"publish\":\r\n\r\n try:\r\n # Use HTMLParser due to issues with BeautifulSoup 3\r\n title = HTMLParser().unescape(item.title.contents[0])\r\n except IndexError:\r\n title = 'No title [%s]' % item.find('post_name').string\r\n logger.warning('Post \"%s\" is lacking a proper title' % title)\r\n\r\n filename = item.find('post_name').string\r\n post_id = item.find('post_id').string\r\n filename = get_filename(filename, post_id)\r\n\r\n content = item.find('encoded').string\r\n raw_date = item.find('post_date').string\r\n date_object = time.strptime(raw_date, \"%Y-%m-%d %H:%M:%S\")\r\n date = time.strftime(\"%Y-%m-%d %H:%M\", date_object)\r\n author = item.find('creator').string\r\n\r\n categories = [cat.string for cat in item.findAll('category', {'domain' : 'category'})]\r\n # caturl = [cat['nicename'] for cat in item.find(domain='category')]\r\n\r\n tags = [tag.string for tag in item.findAll('category', {'domain' : 'post_tag'})]\r\n\r\n kind = 'article'\r\n post_type = item.find('post_type').string\r\n if post_type == 'page':\r\n kind = 'page'\r\n elif wp_custpost:\r\n if post_type == 'post':\r\n pass\r\n # Old behaviour was to name everything not a page as an article.\r\n # Theoretically all attachments have status == inherit so\r\n # no attachments should be here. But this statement is to\r\n # maintain existing behaviour in case that doesn't hold true.\r\n elif post_type == 'attachment':\r\n pass\r\n else:\r\n kind = post_type\r\n yield (title, content, filename, date, author, categories, tags,\r\n kind, \"wp-html\")",
"def post():\n pass",
"def post(self):\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n # if user enter good subject and content, redirect them to new post page\n if subject and content:\n p = Post(parent = blog_key(), subject = subject, content = content)\n p.put() # store the post element into database\n self.redirect('/blog/%s' % str(p.key().id()))\n # otherwise, render an error page \n else:\n error = \"subject and content, please!\"\n self.render(\"newpost.html\", subject=subject, content=content, error=error)",
"def process_post_data_username(post):\n if not post.get('username', None):\n return post\n\n post_data = post.copy()\n post_data['username'] = phone_number_to_international(post_data['username'])\n return post_data"
] |
[
"0.6553351",
"0.6254215",
"0.61910105",
"0.5871757",
"0.58004254",
"0.5777974",
"0.5763263",
"0.57372594",
"0.5677387",
"0.56719476",
"0.56647515",
"0.5660065",
"0.5658226",
"0.56144637",
"0.5554312",
"0.55404764",
"0.5533678",
"0.55197513",
"0.55081964",
"0.54926974",
"0.5477193",
"0.54480267",
"0.5438722",
"0.54168385",
"0.539712",
"0.5392158",
"0.5343364",
"0.53398687",
"0.5308359",
"0.53016156"
] |
0.6616748
|
0
|
Replace `PostList` nodes with lists of posts. Also, register all posts if they have not been registered yet.
|
def process_postlist(app, doctree, docname):
blog = Blog(app)
if not blog:
register_posts(app)
for node in doctree.findall(PostList):
colls = []
for cat in ["tags", "author", "category", "location", "language"]:
for coll in node[cat]:
if coll in blog.catalogs[cat].collections:
colls.append(blog.catalogs[cat].collections[coll])
if colls:
posts = set(blog.posts)
for coll in colls:
posts = posts & set(coll)
posts = list(posts)
posts.sort(reverse=True)
posts = posts[: node.attributes["length"]]
else:
posts = list(blog.recent(node.attributes["length"], docname, **node.attributes))
if node.attributes["sort"]:
posts.sort() # in reverse chronological order, so no reverse=True
fmts = list(Formatter().parse(node.attributes["format"]))
not_in = {"date", "title", "author", "location", "language", "category", "tags", None}
for text, key, __, __ in fmts:
if key not in not_in:
raise KeyError(f"{key} is not recognized in postlist format")
excerpts = node.attributes["excerpts"]
expand = node.attributes["expand"]
date_format = node.attributes["date"] or _(blog.post_date_format_short)
bl = nodes.bullet_list()
bl.attributes["classes"].append("postlist-style-" + node["list-style"])
bl.attributes["classes"].append("postlist")
for post in posts:
bli = nodes.list_item()
bli.attributes["classes"].append("ablog-post")
bl.append(bli)
par = nodes.paragraph()
bli.append(par)
for text, key, __, __ in fmts:
if text:
par.append(nodes.Text(text))
if key is None:
continue
if key == "date":
par.append(nodes.Text(post.date.strftime(date_format)))
else:
if key == "title":
items = [post]
else:
items = getattr(post, key)
for i, item in enumerate(items, start=1):
if key == "title":
ref = nodes.reference()
if item.options.get("external_link"):
ref["refuri"] = post.options.get("external_link")
else:
ref["refuri"] = app.builder.get_relative_uri(docname, item.docname)
ref["internal"] = True
ref["ids"] = []
ref["backrefs"] = []
ref["dupnames"] = []
ref["classes"] = []
ref["names"] = []
ref.append(nodes.Text(str(item)))
par.attributes["classes"].append("ablog-post-title")
else:
ref = _missing_reference(app, item.xref, docname)
par.append(ref)
if i < len(items):
par.append(nodes.Text(", "))
if excerpts and post.excerpt:
for enode in post.excerpt:
enode = enode.deepcopy()
enode.attributes["classes"].append("ablog-post-excerpt")
revise_pending_xrefs(enode, docname)
app.env.resolve_references(enode, docname, app.builder)
enode.parent = bli.parent
bli.append(enode)
if expand:
ref = app.builder.get_relative_uri(docname, post.docname)
enode = nodes.paragraph()
enode.attributes["classes"].append("ablog-post-expand")
refnode = nodes.reference("", "", internal=True, refuri=ref)
innernode = nodes.emphasis(text=expand)
refnode.append(innernode)
enode.append(refnode)
bli.append(enode)
node.replace_self(bl)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def update_all_posts():\n for post in CURRENT_POSTS:\n update_tag(post)",
"def postList(posts):\n post_list = list()\n for post in posts:\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list",
"def add(self, posts):\n li_html = []\n for post in posts:\n li_html.append(\n u'<li><a href=\"{route}\">{title}</a></li>'.format(\n route=post.route, title=post.title))\n self._blog_list = u'\\n'.join(li_html)\n self._posts = posts",
"def marshal_posts(shard, post_list):\n out = []\n for post in post_list:\n post_dict = dict(\n shardId=shard,\n archiveType=models.Post.ARCHIVE_REVERSE_MAPPING[post.archive_type],\n nickname=post.nickname,\n title=post.title,\n body=post.body,\n postTimeMs=models.datetime_to_stamp_ms(post.post_time),\n sequenceId=getattr(post, 'sequence', None),\n newTopicId=post.new_topic,\n postId=post.post_id)\n out.append(post_dict)\n return out",
"def remotePostList(host, posts, public):\n post_list = list()\n posts = posts.get('posts')\n for post in posts:\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('source')\n comments = remoteCommentList(post)\n count = post.get('count')\n next = \"{}/api/posts/{}/comments\".format(DOMAIN, id)\n if host.endswith(\"/\"):\n host = host[:-1]\n source = \"{}/posts/{}\".format(host, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin,\n 'source': source, 'count': count, 'next': next}\n post_list.append(post_dict)\n return post_list",
"def process_posts(app, doctree):\n env = app.builder.env\n if not hasattr(env, \"ablog_posts\"):\n env.ablog_posts = {}\n post_nodes = list(doctree.findall(PostNode))\n if not post_nodes:\n return\n post_date_format = app.config[\"post_date_format\"]\n should_auto_orphan = app.config[\"post_auto_orphan\"]\n docname = env.docname\n if should_auto_orphan:\n # mark the post as 'orphan' so that\n # \"document isn't included in any toctree\" warning is not issued\n # We do not simply assign to should_auto_orphan because if auto-orphan\n # is false, we still want to respect the per-post :rst:dir`orphan` setting\n app.env.metadata[docname][\"orphan\"] = True\n blog = Blog(app)\n auto_excerpt = blog.post_auto_excerpt\n multi_post = len(post_nodes) > 1 or blog.post_always_section\n for order, node in enumerate(post_nodes, start=1):\n if node[\"excerpt\"] is None:\n node[\"excerpt\"] = auto_excerpt\n if multi_post:\n # section title, and first few paragraphs of the section of post\n # are used when there are more than 1 posts\n section = node\n while True:\n if isinstance(section, nodes.section):\n break\n section = node.parent\n else:\n section = doctree\n # get updates here, in the section that post belongs to\n # Might there be orphan updates?\n update_dates = _get_update_dates(section, docname, post_date_format)\n # Making sure that post has a title because all post titles\n # are needed when resolving post lists in documents\n title = node[\"title\"] or _get_section_title(section)\n # creating a summary here, before references are resolved\n excerpt = []\n if node.children:\n if node[\"exclude\"]:\n node.replace_self([])\n else:\n node.replace_self(node.children)\n for child in node.children:\n excerpt.append(child.deepcopy())\n elif node[\"excerpt\"]:\n count = 0\n for nod in section.findall(nodes.paragraph):\n excerpt.append(nod.deepcopy())\n count += 1\n if count >= (node[\"excerpt\"] or 0):\n break\n node.replace_self([])\n else:\n node.replace_self([])\n nimg = node[\"image\"] or blog.post_auto_image\n if nimg:\n for img, nod in enumerate(section.findall(nodes.image), start=1):\n if img == nimg:\n excerpt.append(nod.deepcopy())\n break\n date = node[\"date\"]\n if date:\n try:\n date = datetime.strptime(date, post_date_format)\n except ValueError:\n if date_parser:\n try:\n date = date_parser(date)\n except ValueError:\n raise ValueError(\"invalid post date in: \" + docname)\n else:\n raise ValueError(\n f\"invalid post date ({date}) in \" + docname + f\". Expected format: {post_date_format}\"\n )\n else:\n date = None\n # if docname ends with `index` use folder name to reference the document\n # a potential problem here is that there may be files/folders with the\n # same name, so issuing a warning when that's the case may be a good idea\n folder, label = os.path.split(docname)\n if label == \"index\":\n folder, label = os.path.split(folder)\n if not label:\n label = slugify(title)\n section_name = \"\"\n if multi_post and section.parent is not doctree:\n section_name = section.attributes[\"ids\"][0]\n label += \"-\" + section_name\n else:\n # create a reference for the post\n # if it is posting the document\n # ! this does not work for sections\n app.env.domains[\"std\"].data[\"labels\"][label] = (docname, label, title)\n app.env.domains[\"std\"].data[\"anonlabels\"][label] = (docname, label)\n if section.parent is doctree:\n section_copy = section[0].deepcopy()\n else:\n section_copy = section.deepcopy()\n # multiple posting may result having post nodes\n for nn in section_copy.findall(PostNode):\n if nn[\"exclude\"]:\n nn.replace_self([])\n else:\n nn.replace_self(node.children)\n postinfo = {\n \"docname\": docname,\n \"section\": section_name,\n \"order\": order,\n \"date\": date,\n \"update\": max(update_dates + [date]),\n \"title\": title,\n \"excerpt\": excerpt,\n \"tags\": node[\"tags\"],\n \"author\": node[\"author\"],\n \"category\": node[\"category\"],\n \"location\": node[\"location\"],\n \"language\": node[\"language\"],\n \"redirect\": node[\"redirect\"],\n \"nocomments\": node[\"nocomments\"],\n \"image\": node[\"image\"],\n \"exclude\": node[\"exclude\"],\n \"external_link\": node[\"external_link\"],\n \"doctree\": section_copy,\n }\n if docname not in env.ablog_posts:\n env.ablog_posts[docname] = []\n env.ablog_posts[docname].append(postinfo)\n # instantiate catalogs and collections here\n # so that references are created and no warnings are issued\n if app.builder.format == \"html\":\n stdlabel = env.domains[\"std\"].data[\"labels\"] # NOQA\n else:\n if hasattr(env, \"intersphinx_inventory\"):\n stdlabel = env.intersphinx_inventory.setdefault(\"std:label\", {}) # NOQA\n baseurl = getattr(env.config, \"blog_baseurl\").rstrip(\"/\") + \"/\" # NOQA\n project, version = env.config.project, str(env.config.version) # NOQA\n for key in [\"tags\", \"author\", \"category\", \"location\", \"language\"]:\n catalog = blog.catalogs[key]\n for label in postinfo[key]:\n coll = catalog[label] # NOQA\n if postinfo[\"date\"]:\n coll = blog.archive[postinfo[\"date\"].year] # NOQA",
"def register_posts(app):\n blog = Blog(app)\n for docname, posts in getattr(app.env, \"ablog_posts\", {}).items():\n for postinfo in posts:\n blog.register(docname, postinfo)",
"def make_list(posts, dst, list_layout, item_layout, limit=None, **params):\n items = []\n for k, post in enumerate(posts):\n item_params = dict(params, **post)\n\n # Get title and summary\n title, summary = get_title_and_summary(item_params['dest_path'])\n item_params['title'] = title\n item_params['summary'] = summary\n\n item = render(item_layout, **item_params)\n items.append(item)\n\n # Limit to `limit` items\n if limit is not None and k + 1 >= limit:\n break\n\n params['content'] = ''.join(items)\n dst_path = render(dst, **params)\n output = render(list_layout, **params)\n\n log('Rendering list => {} ...', dst_path)\n fwrite(dst_path, output)",
"def add_posts(self, posts, update=True):\n\n if not isinstance(posts, Iterable):\n posts = [posts,]\n \n subreddit_names = set([post.subreddit for post in posts])\n\n for subreddit_name in subreddit_names:\n subreddit = self.add_subreddit(name=subreddit_name, raise_existing=False)\n subreddit_posts = [post for post in posts if post.subreddit == subreddit_name]\n\n for post in subreddit_posts:\n existing = self.session.query(Post).filter(Post.post_id == post.id).first()\n\n if existing is None:\n entry = self._post_model_to_entry(post)\n entry.subreddit = subreddit\n self.session.add(entry)\n else:\n existing = self._update_post_entry(existing, post)",
"def save_posts(self, posts):\n return self.collection.insert_many(map(lambda post: post.serialize(), posts))",
"def test_update_push_list_of_list(self):\n\n class BlogPost(Document):\n slug = StringField()\n tags = ListField()\n\n BlogPost.drop_collection()\n\n post = BlogPost(slug=\"test\").save()\n\n BlogPost.objects.filter(slug=\"test\").update(push__tags=[\"value1\", 123])\n post.reload()\n assert post.tags == [[\"value1\", 123]]",
"def _update_post_node(node, options, arguments):\n node[\"date\"] = arguments[0] if arguments else None\n node[\"tags\"] = options.get(\"tags\", [])\n node[\"author\"] = options.get(\"author\", [])\n node[\"category\"] = options.get(\"category\", [])\n node[\"location\"] = options.get(\"location\", [])\n node[\"language\"] = options.get(\"language\", [])\n node[\"redirect\"] = options.get(\"redirect\", [])\n node[\"title\"] = options.get(\"title\", None)\n node[\"image\"] = options.get(\"image\", None)\n node[\"excerpt\"] = options.get(\"excerpt\", None)\n node[\"exclude\"] = \"exclude\" in options\n node[\"nocomments\"] = \"nocomments\" in options\n node[\"external_link\"] = options.get(\"external_link\", [])\n return node",
"def add(self, posts):\n for post in posts:\n self._feed.add(FeedEntry(\n summary=post.summary,\n title=post.title,\n title_type='html',\n url=post.url,\n updated=post.date,\n ))",
"def update_posts(accounts):\n # print(account.columns)\n for index, post in accounts.iterrows():\n\n # If a post with this URL already exists in database, then continue with next one\n if collection.count_documents({'Posts.URL': post['URL']}, limit=1) != 0:\n print('Post with url ', post['URL'], ' already exists')\n continue\n # Get tags from all posts\n # hashtags = []\n try:\n hashtags = list({tag.strip(\"#\") for tag in post['Description'].split() if tag.startswith(\"#\")})\n except:\n hashtags = []\n # get preprocessed description\n description_without_hashtags, description_preprocessed = preprocess_description(str(post['Description']))\n # update collection with posts\n collection.update_one(\n {\n 'Codename': post['User Name']\n },\n {\n '$push': {\n 'Posts': {'Followers at Posting': post['Followers at Posting'],\n 'Post Created': post['Post Created'],\n 'Post Created Date': post['Post Created Date'],\n 'Post Created Time': post['Post Created Time'],\n 'Type': post['Type'],\n 'Total Interactions': post['Total Interactions'],\n 'Likes': post['Likes'],\n 'Comments': post['Comments'],\n 'Views': post['Views'],\n 'URL': post['URL'],\n 'Link': post['Link'],\n 'Photo': post['Photo'],\n 'Title': post['Title'], # not\n 'Description': post['Description'],\n 'description_without_hashtags': description_without_hashtags,\n 'description_preprocessed': description_preprocessed,\n 'Hashtags': hashtags,\n 'Image Text': post['Image Text'],\n 'Sponsor Id': post['Sponsor Id'],\n 'Sponsor Name': post['Sponsor Name'],\n 'Overperforming Score': post['Overperforming Score (weighted — Likes 1x Comments 1x )']\n }\n }\n }\n )",
"def map_postorder(lamark_ast, visit_func):\n new_children = []\n for node in lamark_ast.get_children():\n new_child = map_postorder(node, visit_func)\n if new_child is not None:\n new_children.append(new_child)\n lamark_ast.set_children(new_children)\n return visit_func(lamark_ast)",
"def lister(list_value):\n\n for item in list_value:\n if isinstance(item, basestring):\n index = list_value.index(item)\n rep, fact = replacer(replace_value=item)\n if fact is True:\n list_value[index] = rep\n\n elif isinstance(item, list):\n lister(list_value=item)\n\n elif isinstance(item, dict):\n _super_munger(mungie=item)",
"def _populate_posts(self, channel, url):\n import feedparser\n\n Post = get_model('articles', 'Post')\n Image = get_model('images', 'Image')\n\n parser = feedparser.parse(url)\n\n for entry in parser['entries']:\n # Some entries are incomplete and have only the title, need to\n # ignore these entries.\n if not entry.get('summary'):\n continue\n\n # The title may have only 140 characters\n title = self._truncate_string(entry['title'], 140)\n slug = slugify(title)\n headline = entry['summary']\n\n # Some entries do not have the 'content' field, in this case we\n # get the 'summary' field instead.\n if entry.get('content'):\n content = entry['content'][0]['value']\n else:\n content = entry['summary']\n\n # When we find a entry that already is registered we don't need\n # continue because the following registries already be registered.\n exists = Post.objects.filter(slug=slug).count()\n if exists:\n break\n\n # Check if has some image in the post content.\n # NOTE: For the best user experience we use only the posts that\n # have images.\n image_url = self._get_image_url_in_content(content)\n if image_url:\n main_image = Image.objects.create(\n title=title,\n slug=slug,\n archive_link=image_url,\n published=True,\n user=self._user\n )\n # Generate the 'short_title' based on 'content'\n short_title = re.sub('<[^<]+?>', '', content).encode('utf-8')\n short_title = self._truncate_string(short_title.strip(), 140)\n\n post = Post.objects.create(\n title=title,\n short_title=short_title,\n slug=slug,\n headline=headline,\n content=content,\n channel=channel,\n main_image=main_image,\n show_on_root_channel=True,\n published=True,\n hat='',\n user=self._user\n )",
"def post_process(data):\n for record in data[\"Records\"]:\n for name, value in record.items():\n if type(value) == list:\n newlist = []\n for entry in value:\n newlist.append(post_process_pair(name, entry))\n record[name] = newlist\n else:\n record[name] = post_process_pair(name, value)",
"def post_order_list(root, lst):\n if None is root:\n return\n post_order_list(root.get_left(), lst)\n post_order_list(root.get_right(), lst)\n lst.append(root)",
"def rebuild_from_yaml(args):\n\n git_checkout_branch('gh-pages')\n\n posts = []\n for fname in glob('_posts/*.html'):\n with codecs.open(fname, 'r', 'utf-8') as f:\n c = f.read()\n # we only want the yaml frontmatter\n start = c.index('---') + 3\n end = c.rindex('---')\n frontmatter = yaml.safe_load(c[start:end])\n\n posts.append(Post(**frontmatter['api_data']['post']))\n\n _write_out(posts, yaml=False, supporting=True)",
"def post_process_post(self, post):\r\n post.article = self.rewrite_ob_urls(post.article)\r\n post._commit()\r\n \r\n comments = Comment._query(Comment.c.link_id == post._id, data = True)\r\n for comment in comments:\r\n comment.body = self.rewrite_ob_urls(comment.body)\r\n comment._commit()",
"def postorderUtil(self, root):\n if root:\n self.postorderUtil(root.left)\n self.postorderUtil(root.right)\n self.postlist.append(root.key)\n return self.postlist",
"def serializePostsData(influencer, posts, length_limit=30, highlight=False):\n from debra import serializers\n\n posts_data = []\n urls = set()\n posts = list(posts)\n dated = []\n undated = []\n for post in posts:\n if post.create_date:\n dated.append(post)\n else:\n undated.append(post)\n\n posts = sorted(dated, key=lambda x: x.create_date)\n posts.reverse()\n posts.extend(undated)\n\n if length_limit:\n length_limit = length_limit\n\n for post in posts:\n if post.url in urls:\n continue\n urls.add(post.url)\n post_data = {}\n post_data[\"post_image\"] = post.post_image\n stripped_content, images = tagStripper(\n post.content, length_limit=length_limit)\n post_data[\"content\"] = stripped_content\n post_data[\"content_images\"] = images\n post_data[\"url\"] = post.url\n post_data[\"blog_name\"] = serializers.unescape(influencer.blogname if influencer else\\\n post.influencer.blogname)\n post_data[\"title\"] = post.title\n post_data[\"platform\"] = get_post_platform(post)\n if highlight:\n post_data[\"highlight\"] = True\n if post.create_date:\n post_data[\"create_date\"] = post.create_date.strftime(\"%b. %e, %Y\")\n if not influencer:\n post_data['user'] = post.influencer.feed_stamp\n if post.products_json:\n post_data[\"products\"] = post.get_product_json()\n else:\n post_data[\"products\"] = []\n posts_data.append(post_data)\n return posts_data",
"def post_list(request):\n\timage_post_list = list(ImagePost.objects.all())\n\tvideo_post_list = list(VideoPost.objects.all())\n\tall_post = image_post_list + video_post_list\n\treturn render(request,'devblog/post_list.html', {'posts':all_post})",
"def post_list(request):\n if request.method == 'GET':\n posts = Post.objects.all()\n serializer = PostSerializer(posts, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = PostSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)",
"def render_posts(self, **params):\n\n if \"user_posts\" in params:\n posts = params['user_posts']\n else:\n posts = Post.get_all()\n\n rendered_posts = \"\"\n for post in posts:\n rendered_posts += self.render_post(post, **params)\n\n self.render(\"blog/blog.html\", rendered_posts=rendered_posts)",
"def postorder_DFT(tree, nodelist):\n if tree.lnode is not None:\n postorder_DFT(tree.lnode, nodelist)\n if tree.rnode is not None:\n postorder_DFT(tree.rnode, nodelist)\n nodelist.append(tree)\n return nodelist",
"def _autoplace(self, nodes):\n for node in nodes:\n node.autoplace()",
"def process_tag_list(self, taglist):\r\n self.do_before()\r\n for tag in taglist:\r\n self.feed(tag)\r\n self.do_after()",
"def update_db_from_rss():\n today = date.today()\n url = 'https://newyork.craigslist.org/search/jjj?query=unpaid&sort=rel&format=rss'\n\n cached_feed = CachedFeed.query.filter_by(rss_url=url, date=today).first()\n if not cached_feed:\n resp = requests.get(url)\n cached_feed = CachedFeed(rss_url=url, text=resp.text)\n db.session.add(cached_feed)\n db.session.commit()\n\n feed = feedparser.parse(cached_feed.text)\n\n for entry in feed.entries:\n link = entry['link']\n\n # Skip postings that already exist when scanning\n posting = Posting.query.filter_by(url=link, rss_url=url).first()\n if posting:\n continue\n\n posting_resp = requests.get(link)\n posting_soup = BeautifulSoup(posting_resp.text)\n\n replylink = posting_soup.find(id=\"replylink\")\n contact_href = replylink.get('href') if replylink else None\n\n contact_url = urljoin(url, contact_href)\n contact_resp = requests.get(contact_url)\n contact_soup = BeautifulSoup(contact_resp.text)\n\n anonemail_el = contact_soup.find(class_=\"anonemail\")\n title = posting_soup.find('title').text\n\n posting = Posting(title=title,\n url=link,\n rss_url=url,\n text=unicode(posting_soup.find(id='postingbody')),\n region='nyc',\n posted_at = datetime.fromtimestamp(mktime(entry.published_parsed)),\n email=anonemail_el.text if anonemail_el else None,\n email_subject=title,\n email_body=current_app.config['EMAIL_DEFAULT_BODY']\n )\n\n db.session.add(posting)\n\n print(u\"finished {}, sleeping\".format(link))\n time.sleep(15)\n\n db.session.commit()"
] |
[
"0.61877924",
"0.5988142",
"0.5968003",
"0.5947699",
"0.5835054",
"0.5783334",
"0.57208127",
"0.55403996",
"0.5534692",
"0.5378993",
"0.52760565",
"0.52615005",
"0.5249777",
"0.5159502",
"0.5143264",
"0.5132592",
"0.5100237",
"0.5050234",
"0.5036484",
"0.5034668",
"0.5029585",
"0.5015479",
"0.5004067",
"0.49988735",
"0.4997636",
"0.49852186",
"0.49441525",
"0.49268886",
"0.48809135",
"0.4839731"
] |
0.67510056
|
0
|
Generate archive pages for all posts, categories, tags, authors, and drafts.
|
def generate_archive_pages(app):
if not ablog.builder_support(app):
return
blog = Blog(app)
for post in blog.posts:
for redirect in post.redirect:
yield (redirect, {"redirect": post.docname, "post": post}, "ablog/redirect.html")
found_docs = app.env.found_docs
atom_feed = bool(blog.blog_baseurl)
feed_archives = blog.blog_feed_archives
blog_path = blog.blog_path
for title, header, catalog in [
(_("Authors"), _("Posts by"), blog.author),
(_("Locations"), _("Posts from"), blog.location),
(_("Languages"), _("Posts in"), blog.language),
(_("Categories"), _("Posts in"), blog.category),
(_("All posts"), _("Posted in"), blog.archive),
(_("Tags"), _("Posts tagged"), blog.tags),
]:
if not catalog:
continue
context = {"parents": [], "title": title, "header": header, "catalog": catalog, "summary": True}
if catalog.docname not in found_docs:
yield (catalog.docname, context, "ablog/catalog.html")
for collection in catalog:
if not collection:
continue
context = {
"parents": [],
"title": f"{header} {collection}",
"header": header,
"collection": collection,
"summary": True,
"feed_path": collection.path if feed_archives else blog_path,
"archive_feed": atom_feed and feed_archives,
}
context["feed_title"] = context["title"]
if collection.docname not in found_docs:
yield (collection.docname, context, "ablog/collection.html")
if 1:
context = {
"parents": [],
"title": _("All Posts"),
"header": _("All"),
"collection": blog.posts,
"summary": True,
"atom_feed": atom_feed,
"feed_path": blog.blog_path,
}
docname = blog.posts.docname
yield (docname, context, "ablog/collection.html")
context = {"parents": [], "title": _("Drafts"), "collection": blog.drafts, "summary": True}
yield (blog.drafts.docname, context, "ablog/collection.html")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show_archives():\n if not session.get('logged_in'): \n latest = Post.query.filter_by(visible=True)\n else:\n latest = Post.query\n latest = latest.order_by(Post.id.desc()).limit(10)\n months = Post.query.get_months()\n tags = Tag.query.order_by(Tag.name).all()\n #: Needed for calculation of tag cloud\n max_count = Tag.query.get_maxcount()\n categories = sorted(Category.query.all(), key=lambda x: -x.post_count)\n uncategorized_count = Post.query.filter(Post.categories==None).count()\n return render_template('archives.html', latest=latest, tags=tags,\n categories=categories, uncategorized_count=uncategorized_count, \n months=months, max_count=max_count)",
"def generate_pages(self, writer):\r\n write = partial(writer.write_file,\r\n relative_urls=self.settings['RELATIVE_URLS'])\r\n\r\n # to minimize the number of relative path stuff modification\r\n # in writer, articles pass first\r\n self.generate_articles(write)\r\n self.generate_period_archives(write)\r\n self.generate_direct_templates(write)\r\n\r\n # and subfolders after that\r\n self.generate_tags(write)\r\n self.generate_categories(write)\r\n self.generate_authors(write)\r\n self.generate_drafts(write)",
"def write_aggregated_chapters(self, folder):\n cats = sorted([(k, len(v))\n for k, v in self.get_categories_group().items()])\n months = sorted(\n [(k, len(v)) for k, v in self.get_months_group().items()], reverse=True)\n res = [\"\", \":orphan:\", \"\", \".. _hblog-blog:\",\n \"\", \"\", \"Blog\", \"====\", \"\", \"\"]\n res.extend(\n [f\"* :ref:`{TITLES[self.Lang]['page1']} <ap-main-0>`\", \"\", \"\"])\n res.extend([TITLES[self.Lang][\"by category:\"], \"\", \"\"])\n for cat, nb in cats:\n res.append(\n \"* :ref:`{0} ({1}) <ap-cat-{0}-0>`\".format(BlogPostList.category2url(cat), nb))\n res.extend([\"\", \"\", \"\"])\n res.extend([TITLES[self.Lang][\"by month:\"], \"\", \"\"])\n res.extend([\"\", \"\", \"\"])\n for mon, nb in months:\n res.append(\"* :ref:`{0} ({1}) <ap-month-{0}-0>`\".format(mon, nb))\n\n res.extend([\"\", \"\", \"\"])\n res.extend([TITLES[self.Lang][\"by title:\"], \"\", \"\"])\n res.extend(\n [\"\", \"\", f\":ref:`{TITLES[self.Lang]['allblogs']} <l-mainblog>`\", \"\", \"\"])\n\n filename = os.path.join(folder, \"index_blog.rst\")\n with open(filename, \"w\", encoding=\"utf8\") as f:\n f.write(\"\\n\".join(res))\n return [filename]",
"def _generate_list_page(self, director, blog_posts):\n logger.info(_('Generating blog list page ...'))\n template = director.catalog.get_template(self.list_template)\n builder = ListPageBuilder(template)\n builder.add(blog_posts)\n output_file = os.path.join(director.outdir, self.list_output)\n builder.write_to(output_file)",
"def produce_aggregated_post_page(name, lp, this, prev, next, main_page=\"Blog\",\n rst_links_up=None, rst_links_down=None,\n index_terms=None, bold_title=None, language=\"en\"):\n direction = \"|rss_image| \"\n if prev is not None:\n direction += f\":ref:`<== <{prev}>` \"\n if bold_title is not None:\n if len(direction) > 0:\n direction += \" \"\n direction += f\"**{bold_title}**\"\n if next is not None:\n if len(direction) > 0:\n direction += \" \"\n direction += f\":ref:`==> <{next}>`\"\n arrows = direction\n if main_page is not None:\n if len(direction) > 0:\n direction += \" \"\n direction += f\":ref:`{main_page} <ap-main-0>`\"\n if rst_links_up is not None:\n if len(direction) > 0:\n direction += \" \"\n direction += \" \".join(rst_links_up)\n\n rows = []\n rows.append(\"\")\n rows.append(\":orphan:\")\n rows.append(\"\")\n rows.append(direction)\n rows.append(\"\")\n rows.append(\".. |rss_image| image:: feed-icon-16x16.png\")\n rows.append(\" :target: ../_downloads/rss.xml\")\n rows.append(\" :alt: RSS\")\n rows.append(\"\")\n rows.append(\"----\")\n rows.append(\"\")\n\n if index_terms is not None:\n rows.append(\"\")\n rows.append(\".. index:: \" + \",\".join(index_terms))\n rows.append(\"\")\n\n rows.append(\"\")\n rows.append(f\".. _{this}:\")\n rows.append(\"\")\n\n if bold_title is not None:\n rows.append(bold_title)\n rows.append(\"+\" * len(bold_title))\n rows.append(\"\")\n\n for post in lp:\n text = post.post_as_rst(language=language, cut=True)\n rows.append(text)\n rows.append(\"\")\n rows.append(\"\")\n\n rows.append(\"\")\n rows.append(\"----\")\n rows.append(\"\")\n if rst_links_down is not None:\n if len(arrows) > 0:\n arrows += \" \"\n arrows += \" \".join(rst_links_down)\n rows.append(arrows)\n\n return \"\\n\".join(rows)",
"def archive_get_post_list(user,max_pages=100):\n all_posts = []\n base_url = \"http://\"+user+\".tumblr.com\"\n first_archive_url = base_url+\"/archive\"\n next_page_link = first_archive_url\n # Loop over archive pages\n last_page_posts = []\n counter = 0\n while (counter <= max_pages):\n counter += 1\n logging.info(\"Scanning archive page for post ids, page # \"+repr(counter))\n logging.debug(\"next_page_link:\"+repr(next_page_link))\n page_html = get(next_page_link)\n # Check if we've reached the end; if so, stop.\n end_reached = archive_check_if_end_of_posts(page_html)\n if end_reached:\n logging.info(\"Last page of archive listing reached, stopping scan\")\n break\n this_page_posts = archive_parse_for_posts(page_html)\n logging.debug(\"this_page_posts:\"+repr(this_page_posts))\n # Stop if two pages have the same data\n if this_page_posts == last_page_posts:\n logging.info(\"Last pages data is the same as this ones!\")\n break\n all_posts += this_page_posts\n next_page_link = archive_find_next_page_url(base_url,page_html)\n last_page_posts = this_page_posts\n continue\n # Sanity check post list\n if len(all_posts) != len(set(all_posts)):\n logging.error(\"Duplicate posts in archive page listing results!\")\n assert False\n #assert False# This needs changing to find dates for each post!\n logging.debug(\"all_posts:\"+repr(all_posts))\n return all_posts",
"def render_archives():\n\n\tq = \"SELECT title, text, id, project FROM entries WHERE archived=1 ORDER BY id desc\"\n\tcur = g.db.execute(q)\n\trows = cur.fetchall()\n\tentries = [dict(\n\t\t\ttitle=row[0], \n\t\t\ttext=row[1], \n\t\t\tid=row[2], \n\t\t\tproject=row[3]) for row in rows]\n\n\t\"\"\" filter catagories as to not repeat \"\"\"\n\tfiltered_catagories = set([ x[3] for x in rows ])\n\n\treturn render_template('show_entries.html', \n\t\tentries=entries, \n\t\tcatagories=filtered_catagories,\n\t\tfiltered=False,\n\t\tarchived=True,\n\t\t)",
"def create_category_pages(app):\n env = app.builder.env\n # jinja2 html template\n template = CATEGORY_PAGE_TEMPLATE\n\n categories = env.categories\n for name, category in categories.iteritems():\n context = {}\n # First write out the named page\n context[\"title\"] = category.name\n\n #get parent category\n if \"\\\\\" in category.name:\n categs = category.name.split(\"\\\\\")\n categs.pop()\n parent_category = r\"\\\\\".join(categs)\n parent_category_link = \"../\" + categs[-1] + \".html\"\n parent_category = \"<b>Category:</b> <a href='{0}'>{1}</a>\"\\\n .format(parent_category_link,parent_category)\n context[\"parentcategory\"] = parent_category\n\n # sort subcategories & pages alphabetically\n context[\"subcategories\"] = sorted(category.subcategories, key = lambda x: x.name)\n context[\"pages\"] = sorted(category.pages, key = lambda x: x.name)\n context[\"outpath\"] = category.html_path\n\n #jinja appends .html to output name\n category_html_path_noext = os.path.splitext(category.html_path)[0]\n yield (category_html_path_noext, context, template)\n\n # Now any additional index pages if required\n if category.name in INDEX_CATEGORIES:\n # index in categories directory\n category_html_dir = os.path.dirname(category.html_path)\n category_html_path_noext = category_html_dir + \"/index\"\n yield (category_html_path_noext, context, template)\n\n # index in document directory\n document_dir = os.path.dirname(category_html_dir)\n category_html_path_noext = document_dir + \"/index\"\n context[\"outpath\"] = category_html_path_noext + \".html\"\n yield (category_html_path_noext, context, template)",
"def index(request, archive=False):\n context = {'archive':archive}\n posts = Post.objects.all()\n if not archive:\n posts = posts[:10]\n context['posts'] = posts\n if request.user.is_authenticated():\n #These are the new news items the logged in user has\n context['new_posts'] = NewBlog.objects.filter(user=request.user)\n return render(request, 'blog/index.html', context)",
"def html_collect_pages(app):\n\n if not hasattr(app.builder.env, \"categories\"):\n return # nothing to do\n\n for name, context, template in create_category_pages(app):\n yield (name, context, template)",
"def html_collect_pages(app):\n if not hasattr(app.builder.env, \"categories\"):\n return # nothing to do\n\n for name, context, template in create_category_pages(app):\n yield (name, context, template)",
"def web_archive():\n\n try:\n auth_check()\n except Exception as e:\n return flask.redirect(str(e))\n\n return flask.render_template('archive.html', user = flask.session['user'],\n archives = db_get_archives())",
"def create_category_pages(app):\n env = app.builder.env\n\n template = \"category.html\"\n\n categories = env.categories\n for name, category in categories.iteritems():\n context = {}\n context[\"title\"] = category.name\n context[\"subcategories\"] = category.subcategories\n context[\"pages\"] = category.pages\n\n yield (name, context, template)",
"def post_archive(*args, **kwargs):\n return Post.objects.get_blog_posts().dates('publication_date', 'month')",
"def show_all_posts():\n post = Post.query.all()\n\n return render_template('all-posts.html', post=post)",
"def save(self):\n for page in self.pages.get_published_pages():\n site_path = page.path_to_page.replace('.md', '').replace(\n self.source_path, '').strip('/')\n save_path = self.output_path\n\n # ensure we are not creating a directory for the index file that\n # that lives at the source_path\n if page.full_path() != f'{self.source_path}{os.sep}index.md':\n site_path = slugify_path(site_path)\n save_path = os.path.join('', self.output_path, site_path)\n\n try:\n os.makedirs(save_path, exist_ok=True)\n except Exception as e:\n log((f'unable to create directories: {save_path}'\n f' because: {e}'), True)\n continue\n\n try:\n save_file = os.path.join(save_path, 'index.html')\n log(f'saving {save_file}')\n\n published = self.pages.get_published_pages()\n prev_page = self.pages.get_previous_page(page)\n next_page = self.pages.get_next_page(page)\n content = page.render(published_pages=published,\n previous_page=prev_page, next_page=next_page)\n write(save_file, content)\n except Exception as e:\n log(f'unable to save file: {save_file} -- {e}', True)\n\n unpublished = self.pages.get_unpublished_pages()\n if len(unpublished):\n log('')\n log('these pages were unpublished and not rendered:', True)\n for up in unpublished:\n log(up.path_to_page, True)\n log('')\n\n # build the _tags pages\n for tag, pages in self.tags.pages.items():\n content = self.tags.render(tag, pages)\n tag_index_dir = f'{self.tag_dir}/{slugify(tag)}'\n tag_index = f'{tag_index_dir}/index.html'\n os.makedirs(tag_index_dir, exist_ok=True)\n write(tag_index, content)\n\n log('finished builidng site')",
"def create_assets():\n assets = {}\n\n # Load all static files\n for root, dirs, files in os.walk(STATIC_DIR):\n for fname in files:\n filename = os.path.join(root, fname)\n with open(filename, \"rb\") as f:\n assets[os.path.relpath(filename, STATIC_DIR)] = f.read()\n\n # Collect pages\n pages = {}\n for fname in os.listdir(PAGES_DIR):\n if fname.lower().endswith(\".md\"):\n name = fname.split(\".\")[0].lower()\n with open(os.path.join(PAGES_DIR, fname), \"rb\") as f:\n md = f.read().decode()\n pages[name] = Page(name, md)\n\n # todo: Collect blog posts\n\n # Get template\n with open(os.path.join(THIS_DIR, \"template.html\"), \"rb\") as f:\n html_template = f.read().decode()\n\n with open(os.path.join(THIS_DIR, \"style.css\"), \"rb\") as f:\n css = f.read().decode()\n css += \"/* Pygments CSS */\\n\" + HtmlFormatter(style=\"vs\").get_style_defs(\n \".highlight\"\n )\n\n # Generate pages\n year = datetime.now().year\n for page in pages.values():\n page.prepare(pages.keys())\n title = TITLE if page.name == \"index\" else TITLE + \" - \" + page.name\n menu = create_menu(page)\n html = html_template.format(\n title=title, style=css, body=page.to_html(), menu=menu, year=year\n )\n print(\"generating\", page.name + \".html\")\n assets[page.name + \".html\"] = html.encode()\n\n # Fix backslashes on Windows\n for key in list(assets.keys()):\n if \"\\\\\" in key:\n assets[key.replace(\"\\\\\", \"/\")] = assets.pop(key)\n\n return assets",
"def index(self):\n \n return self.view.render('index.html', {\"posts\"=posts})",
"def catalog_archive(category_slug):\n category = Category.by_slug(category_slug)\n products = category.products.all()\n categories = Category.query.all()\n return render_template('frontend/home.html',\n products=products,\n categories=categories,\n selected_cat=category.name)",
"def entries_index(request):\n blog_entries = Entry.objects.filter(status=2).order_by('-pub_date')\n paginator = Paginator(blog_entries, 4)#4 posts/page\n try:\n page = int(request.GET.get('page','1'))\n except ValueError:\n page = 1\n try:\n entries = paginator.page(page)\n except (EmptyPage, InvalidPage):\n entries = paginator.page(paginator.num_pages)\n return render_to_response('blog/blog.html', {'entries':entries}, RequestContext(request))",
"def master_archive(f, e):\n template = e.get_template(TEMPLATES['archive'])\n write_file(\"archives.html\", template.render(entries=f))",
"def handle(self, *args, **options):\n\n # they look strange but are what comes over from wordpress API\n # im giessing there are redirects in place to make this work\n SOURCES = {\n 'sample-page': 'aac',\n 'home-2': 'commissioning',\n 'nhs-england-and-nhs-improvement-corona-virus': 'coronavirus',\n 'greener-nhs': 'greenernhs',\n 'improvement-knowledge-hub': 'improvement-hub',\n 'tbc': 'non-executive-opportunities',\n 'nhs-rightcare': 'rightcare',\n }\n # for BasePage models\n pages = BasePage.objects.all().order_by('-depth')\n\n for page in pages:\n first_published = page.first_published_at\n last_published = page.last_published_at\n latest_revision_created = page.latest_revision_created_at\n\n if page.slug in SOURCES.keys():\n # print(SOURCES[page.wp_slug])\n sys.stdout.write('\\n✅ {} is fixed'.format(SOURCES[page.wp_slug]))\n slug = SOURCES[page.wp_slug]\n page.slug = slug\n \"\"\"\n running save_revision() as it seems like a good idea to not break page paths\n just to be safe...\n try to keep revision dates to match whats in wordpress as our\n revisions reset that at the save()\n \"\"\"\n try:\n rev = page.save_revision()\n page.first_published_at = first_published\n page.last_published_at = last_published\n page.latest_revision_created_at = latest_revision_created\n # probably not the best way to do this but need to update the dates on the page record\n # to keep in sync with wordpress at the import stage\n # futher imports will collect new data and new dates.\n page.save()\n rev.publish()\n except ValidationError:\n print('⚠️ {} slug cannot be updated!!!'.format(page))\n time.sleep(2)\n\n # for ComponentsPage models\n # pages = ComponentsPage.objects.all().order_by('-depth')\n\n # for page in pages:\n # first_published = page.first_published_at\n # last_published = page.last_published_at\n # latest_revision_created = page.latest_revision_created_at\n\n # if page.slug in SOURCES.keys():\n # # print(SOURCES[page.wp_slug])\n # sys.stdout.write('\\n✅ {} is fixed'.format(SOURCES[page.wp_slug]))\n # slug = SOURCES[page.wp_slug]\n # page.slug = slug\n # \"\"\"\n # running save_revision() as it seems like a good idea to not break page paths\n # just to be safe...\n # try to keep revision dates to match whats in wordpress as our\n # revisions reset that at the save()\n # \"\"\"\n # try:\n # rev = page.save_revision()\n # page.first_published_at = first_published\n # page.last_published_at = last_published\n # page.latest_revision_created_at = latest_revision_created\n # # probably not the best way to do this but need to update the dates on the page record\n # # to keep in sync with wordpress at the import stage\n # # futher imports will collect new data and new dates.\n # page.save()\n # rev.publish()\n # except ValidationError:\n # print('⚠️ {} slug cannot be updated!!!'.format(page))\n # time.sleep(2)\n\n sys.stdout.write('\\n✅ Done\\n')",
"def render_all(pages):\n for page in pages:\n render_template(page['template'], page['output'], page['values'])",
"def generate_period_archives(self, write):\r\n try:\r\n template = self.get_template('period_archives')\r\n except Exception:\r\n template = self.get_template('archives')\r\n\r\n period_save_as = {\r\n 'year': self.settings['YEAR_ARCHIVE_SAVE_AS'],\r\n 'month': self.settings['MONTH_ARCHIVE_SAVE_AS'],\r\n 'day': self.settings['DAY_ARCHIVE_SAVE_AS'],\r\n }\r\n\r\n period_date_key = {\r\n 'year': attrgetter('date.year'),\r\n 'month': attrgetter('date.year', 'date.month'),\r\n 'day': attrgetter('date.year', 'date.month', 'date.day')\r\n }\r\n\r\n def _generate_period_archives(dates, key, save_as_fmt):\r\n \"\"\"Generate period archives from `dates`, grouped by\r\n `key` and written to `save_as`.\r\n \"\"\"\r\n # `dates` is already sorted by date\r\n for _period, group in groupby(dates, key=key):\r\n archive = list(group)\r\n # arbitrarily grab the first date so that the usual\r\n # format string syntax can be used for specifying the\r\n # period archive dates\r\n date = archive[0].date\r\n # Under python 2, with non-ascii locales, u\"{:%b}\".format(date) might raise UnicodeDecodeError\r\n # because u\"{:%b}\".format(date) will call date.__format__(u\"%b\"), which will return a byte string\r\n # and not a unicode string.\r\n # eg:\r\n # locale.setlocale(locale.LC_ALL, 'ja_JP.utf8')\r\n # date.__format__(u\"%b\") == '12\\xe6\\x9c\\x88' # True\r\n try:\r\n save_as = save_as_fmt.format(date=date)\r\n except UnicodeDecodeError:\r\n # Python2 only:\r\n # Let date.__format__() work with byte strings instead of characters since it fails to work with characters\r\n bytes_save_as_fmt = save_as_fmt.encode('utf8')\r\n bytes_save_as = bytes_save_as_fmt.format(date=date)\r\n save_as = unicode(bytes_save_as,'utf8')\r\n context = self.context.copy()\r\n\r\n if key == period_date_key['year']:\r\n context[\"period\"] = (_period,)\r\n elif key == period_date_key['month']:\r\n context[\"period\"] = (_period[0],\r\n calendar.month_name[_period[1]])\r\n else:\r\n context[\"period\"] = (_period[0],\r\n calendar.month_name[_period[1]],\r\n _period[2])\r\n\r\n write(save_as, template, context,\r\n dates=archive, blog=True)\r\n\r\n for period in 'year', 'month', 'day':\r\n save_as = period_save_as[period]\r\n if save_as:\r\n key = period_date_key[period]\r\n _generate_period_archives(self.dates, key, save_as)",
"def create_page_objects(self, data):\n for page in data['pages']:\n self.create_page(page)",
"def generate_atom_feeds(app):\n if not ablog.builder_support(app):\n return\n blog = Blog(app)\n base_url = blog.blog_baseurl\n if not base_url:\n return\n feeds = [\n (\n blog.posts,\n blog.blog_path,\n os.path.join(app.builder.outdir, blog.blog_path, feed_root + \".xml\"),\n blog.blog_title,\n os_path_join(base_url, blog.blog_path, feed_root + \".xml\"),\n feed_templates,\n )\n for feed_root, feed_templates in blog.blog_feed_templates.items()\n ]\n if blog.blog_feed_archives:\n for header, catalog in [\n (_(\"Posts by\"), blog.author),\n (_(\"Posts from\"), blog.location),\n (_(\"Posts in\"), blog.language),\n (_(\"Posts in\"), blog.category),\n (_(\"Posted in\"), blog.archive),\n (_(\"Posts tagged\"), blog.tags),\n ]:\n for coll in catalog:\n # skip collections containing only drafts\n if not len(coll):\n continue\n folder = os.path.join(app.builder.outdir, coll.path)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n for feed_root, feed_templates in blog.blog_feed_templates.items():\n feeds.append(\n (\n coll,\n coll.path,\n os.path.join(folder, feed_root + \".xml\"),\n blog.blog_title + \" - \" + header + \" \" + str(coll),\n os_path_join(base_url, coll.path, feed_root + \".xml\"),\n feed_templates,\n )\n )\n # Config options\n feed_length = blog.blog_feed_length\n feed_fulltext = blog.blog_feed_fulltext\n for feed_posts, pagename, feed_path, feed_title, feed_url, feed_templates in feeds:\n feed = FeedGenerator()\n feed.id(blog.blog_baseurl)\n feed.title(feed_title)\n feed.link(href=base_url)\n feed.subtitle(blog.blog_feed_subtitle)\n feed.link(href=feed_url, rel=\"self\")\n feed.language(app.config.language)\n feed.generator(\"ABlog\", ablog.__version__, \"https://ablog.readthedocs.io/\")\n sorted_posts_by_date = sorted(feed_posts, key=lambda post: post.date, reverse=True)\n for i, post in enumerate(sorted_posts_by_date):\n if feed_length and i == feed_length:\n break\n post_url = os_path_join(base_url, app.builder.get_target_uri(post.docname))\n if post.section:\n post_url += \"#\" + post.section\n if blog.blog_feed_titles:\n content = None\n else:\n content = post.to_html(pagename, fulltext=feed_fulltext, img_url=True)\n feed_entry = feed.add_entry(order=\"append\")\n feed_entry.id(post_url)\n feed_entry.link(href=post_url)\n feed_entry.author({\"name\": author.name for author in post.author})\n feed_entry.pubDate(post.date.astimezone())\n feed_entry.updated(post.update.astimezone())\n for tag in sorted(post.tags):\n feed_entry.category(\n dict(\n term=tag.name.strip().replace(\" \", \"\"),\n label=tag.label,\n )\n )\n # Entry values that support templates\n title = post.title\n summary = \"\".join(paragraph.astext() for paragraph in post.excerpt)\n template_values = {}\n for element in (\"title\", \"summary\", \"content\"):\n if element in feed_templates:\n template_values[element] = jinja2.Template(feed_templates[element]).render(**locals())\n feed_entry.title(template_values.get(\"title\", title))\n summary = template_values.get(\"summary\", summary)\n if summary:\n feed_entry.summary(summary)\n content = template_values.get(\"content\", content)\n if content:\n feed_entry.content(content=content, type=\"html\")\n parent_dir = os.path.dirname(feed_path)\n if not os.path.isdir(parent_dir):\n os.makedirs(parent_dir)\n with open(feed_path, \"w\", encoding=\"utf-8\") as out:\n feed_str = feed.atom_str(pretty=True)\n out.write(feed_str.decode())\n if 0:\n # this is to make the function a generator\n # and make work for Sphinx 'html-collect-pages'\n yield",
"def archives(self, **kwargs):\n return self.session.listArchives(**kwargs)",
"def posts_index():\n posts = Post.query.all()\n return render_template('posts.html', posts=posts, post=None)",
"def generate_webpages(self):\n if self.add_to_existing:\n self.add_existing_data()\n self.make_home_pages()\n self.make_1d_histogram_pages()\n self.make_corner_pages()\n self.make_config_pages()\n if self.make_comparison:\n self.make_comparison_pages()\n if self.make_interactive:\n self.make_interactive_pages()\n if self.publication:\n self.make_publication_pages()\n if self.gwdata is not None:\n self.make_detector_pages()\n self.make_error_page()\n self.make_version_page()\n if self.notes is not None:\n self.make_notes_page()\n self.make_downloads_page()\n self.generate_specific_javascript()",
"def month_archive(request, year, month):\n articles = Article.objects.filter(pub_date__year=year, pub_date__month=month)\n context = { 'year': year, 'month': month, 'articles': articles }\n pprint_local_vars(locals())\n return render(request, 'news/month_archive.html', context)"
] |
[
"0.69427127",
"0.6276616",
"0.6273013",
"0.6104014",
"0.6066514",
"0.6062876",
"0.6052862",
"0.6033781",
"0.5921405",
"0.58953536",
"0.5889372",
"0.5798566",
"0.5767002",
"0.57562274",
"0.5737558",
"0.57229877",
"0.5699462",
"0.56583685",
"0.5576395",
"0.55608106",
"0.5549129",
"0.5535884",
"0.5514224",
"0.55089873",
"0.5501776",
"0.5499229",
"0.54934406",
"0.5493006",
"0.54715717",
"0.5465397"
] |
0.7827306
|
0
|
Register posts found in the Sphinx build environment.
|
def register_posts(app):
blog = Blog(app)
for docname, posts in getattr(app.env, "ablog_posts", {}).items():
for postinfo in posts:
blog.register(docname, postinfo)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_posts(app, doctree):\n env = app.builder.env\n if not hasattr(env, \"ablog_posts\"):\n env.ablog_posts = {}\n post_nodes = list(doctree.findall(PostNode))\n if not post_nodes:\n return\n post_date_format = app.config[\"post_date_format\"]\n should_auto_orphan = app.config[\"post_auto_orphan\"]\n docname = env.docname\n if should_auto_orphan:\n # mark the post as 'orphan' so that\n # \"document isn't included in any toctree\" warning is not issued\n # We do not simply assign to should_auto_orphan because if auto-orphan\n # is false, we still want to respect the per-post :rst:dir`orphan` setting\n app.env.metadata[docname][\"orphan\"] = True\n blog = Blog(app)\n auto_excerpt = blog.post_auto_excerpt\n multi_post = len(post_nodes) > 1 or blog.post_always_section\n for order, node in enumerate(post_nodes, start=1):\n if node[\"excerpt\"] is None:\n node[\"excerpt\"] = auto_excerpt\n if multi_post:\n # section title, and first few paragraphs of the section of post\n # are used when there are more than 1 posts\n section = node\n while True:\n if isinstance(section, nodes.section):\n break\n section = node.parent\n else:\n section = doctree\n # get updates here, in the section that post belongs to\n # Might there be orphan updates?\n update_dates = _get_update_dates(section, docname, post_date_format)\n # Making sure that post has a title because all post titles\n # are needed when resolving post lists in documents\n title = node[\"title\"] or _get_section_title(section)\n # creating a summary here, before references are resolved\n excerpt = []\n if node.children:\n if node[\"exclude\"]:\n node.replace_self([])\n else:\n node.replace_self(node.children)\n for child in node.children:\n excerpt.append(child.deepcopy())\n elif node[\"excerpt\"]:\n count = 0\n for nod in section.findall(nodes.paragraph):\n excerpt.append(nod.deepcopy())\n count += 1\n if count >= (node[\"excerpt\"] or 0):\n break\n node.replace_self([])\n else:\n node.replace_self([])\n nimg = node[\"image\"] or blog.post_auto_image\n if nimg:\n for img, nod in enumerate(section.findall(nodes.image), start=1):\n if img == nimg:\n excerpt.append(nod.deepcopy())\n break\n date = node[\"date\"]\n if date:\n try:\n date = datetime.strptime(date, post_date_format)\n except ValueError:\n if date_parser:\n try:\n date = date_parser(date)\n except ValueError:\n raise ValueError(\"invalid post date in: \" + docname)\n else:\n raise ValueError(\n f\"invalid post date ({date}) in \" + docname + f\". Expected format: {post_date_format}\"\n )\n else:\n date = None\n # if docname ends with `index` use folder name to reference the document\n # a potential problem here is that there may be files/folders with the\n # same name, so issuing a warning when that's the case may be a good idea\n folder, label = os.path.split(docname)\n if label == \"index\":\n folder, label = os.path.split(folder)\n if not label:\n label = slugify(title)\n section_name = \"\"\n if multi_post and section.parent is not doctree:\n section_name = section.attributes[\"ids\"][0]\n label += \"-\" + section_name\n else:\n # create a reference for the post\n # if it is posting the document\n # ! this does not work for sections\n app.env.domains[\"std\"].data[\"labels\"][label] = (docname, label, title)\n app.env.domains[\"std\"].data[\"anonlabels\"][label] = (docname, label)\n if section.parent is doctree:\n section_copy = section[0].deepcopy()\n else:\n section_copy = section.deepcopy()\n # multiple posting may result having post nodes\n for nn in section_copy.findall(PostNode):\n if nn[\"exclude\"]:\n nn.replace_self([])\n else:\n nn.replace_self(node.children)\n postinfo = {\n \"docname\": docname,\n \"section\": section_name,\n \"order\": order,\n \"date\": date,\n \"update\": max(update_dates + [date]),\n \"title\": title,\n \"excerpt\": excerpt,\n \"tags\": node[\"tags\"],\n \"author\": node[\"author\"],\n \"category\": node[\"category\"],\n \"location\": node[\"location\"],\n \"language\": node[\"language\"],\n \"redirect\": node[\"redirect\"],\n \"nocomments\": node[\"nocomments\"],\n \"image\": node[\"image\"],\n \"exclude\": node[\"exclude\"],\n \"external_link\": node[\"external_link\"],\n \"doctree\": section_copy,\n }\n if docname not in env.ablog_posts:\n env.ablog_posts[docname] = []\n env.ablog_posts[docname].append(postinfo)\n # instantiate catalogs and collections here\n # so that references are created and no warnings are issued\n if app.builder.format == \"html\":\n stdlabel = env.domains[\"std\"].data[\"labels\"] # NOQA\n else:\n if hasattr(env, \"intersphinx_inventory\"):\n stdlabel = env.intersphinx_inventory.setdefault(\"std:label\", {}) # NOQA\n baseurl = getattr(env.config, \"blog_baseurl\").rstrip(\"/\") + \"/\" # NOQA\n project, version = env.config.project, str(env.config.version) # NOQA\n for key in [\"tags\", \"author\", \"category\", \"location\", \"language\"]:\n catalog = blog.catalogs[key]\n for label in postinfo[key]:\n coll = catalog[label] # NOQA\n if postinfo[\"date\"]:\n coll = blog.archive[postinfo[\"date\"].year] # NOQA",
"def process_postlist(app, doctree, docname):\n blog = Blog(app)\n if not blog:\n register_posts(app)\n for node in doctree.findall(PostList):\n colls = []\n for cat in [\"tags\", \"author\", \"category\", \"location\", \"language\"]:\n for coll in node[cat]:\n if coll in blog.catalogs[cat].collections:\n colls.append(blog.catalogs[cat].collections[coll])\n if colls:\n posts = set(blog.posts)\n for coll in colls:\n posts = posts & set(coll)\n posts = list(posts)\n posts.sort(reverse=True)\n posts = posts[: node.attributes[\"length\"]]\n else:\n posts = list(blog.recent(node.attributes[\"length\"], docname, **node.attributes))\n if node.attributes[\"sort\"]:\n posts.sort() # in reverse chronological order, so no reverse=True\n fmts = list(Formatter().parse(node.attributes[\"format\"]))\n not_in = {\"date\", \"title\", \"author\", \"location\", \"language\", \"category\", \"tags\", None}\n for text, key, __, __ in fmts:\n if key not in not_in:\n raise KeyError(f\"{key} is not recognized in postlist format\")\n excerpts = node.attributes[\"excerpts\"]\n expand = node.attributes[\"expand\"]\n date_format = node.attributes[\"date\"] or _(blog.post_date_format_short)\n bl = nodes.bullet_list()\n bl.attributes[\"classes\"].append(\"postlist-style-\" + node[\"list-style\"])\n bl.attributes[\"classes\"].append(\"postlist\")\n for post in posts:\n bli = nodes.list_item()\n bli.attributes[\"classes\"].append(\"ablog-post\")\n bl.append(bli)\n par = nodes.paragraph()\n bli.append(par)\n for text, key, __, __ in fmts:\n if text:\n par.append(nodes.Text(text))\n if key is None:\n continue\n if key == \"date\":\n par.append(nodes.Text(post.date.strftime(date_format)))\n else:\n if key == \"title\":\n items = [post]\n else:\n items = getattr(post, key)\n\n for i, item in enumerate(items, start=1):\n if key == \"title\":\n ref = nodes.reference()\n if item.options.get(\"external_link\"):\n ref[\"refuri\"] = post.options.get(\"external_link\")\n else:\n ref[\"refuri\"] = app.builder.get_relative_uri(docname, item.docname)\n ref[\"internal\"] = True\n ref[\"ids\"] = []\n ref[\"backrefs\"] = []\n ref[\"dupnames\"] = []\n ref[\"classes\"] = []\n ref[\"names\"] = []\n ref.append(nodes.Text(str(item)))\n par.attributes[\"classes\"].append(\"ablog-post-title\")\n else:\n ref = _missing_reference(app, item.xref, docname)\n par.append(ref)\n if i < len(items):\n par.append(nodes.Text(\", \"))\n if excerpts and post.excerpt:\n for enode in post.excerpt:\n enode = enode.deepcopy()\n enode.attributes[\"classes\"].append(\"ablog-post-excerpt\")\n revise_pending_xrefs(enode, docname)\n app.env.resolve_references(enode, docname, app.builder)\n enode.parent = bli.parent\n bli.append(enode)\n if expand:\n ref = app.builder.get_relative_uri(docname, post.docname)\n enode = nodes.paragraph()\n enode.attributes[\"classes\"].append(\"ablog-post-expand\")\n refnode = nodes.reference(\"\", \"\", internal=True, refuri=ref)\n innernode = nodes.emphasis(text=expand)\n refnode.append(innernode)\n enode.append(refnode)\n bli.append(enode)\n node.replace_self(bl)",
"def update_all_posts():\n for post in CURRENT_POSTS:\n update_tag(post)",
"def rebuild_from_yaml(args):\n\n git_checkout_branch('gh-pages')\n\n posts = []\n for fname in glob('_posts/*.html'):\n with codecs.open(fname, 'r', 'utf-8') as f:\n c = f.read()\n # we only want the yaml frontmatter\n start = c.index('---') + 3\n end = c.rindex('---')\n frontmatter = yaml.safe_load(c[start:end])\n\n posts.append(Post(**frontmatter['api_data']['post']))\n\n _write_out(posts, yaml=False, supporting=True)",
"def post_build_hook(self):",
"def setup(bot):\n bot.add_cog(ResendPost(bot, bot.post_queue))",
"def add_post_to_es(post, db_conn):\n\n from database.topic import get_topic, deliver_topic\n from database.user import get_user, deliver_user\n\n data = json_prep(deliver_post(post))\n topic = get_topic({'id': post['topic_id']}, db_conn)\n if topic:\n data['topic'] = json_prep(deliver_topic(topic))\n user = get_user({'id': post['user_id']}, db_conn)\n if user:\n data['user'] = json_prep(deliver_user(user))\n\n return es.index(\n index='entity',\n doc_type='post',\n body=data,\n id=post['id'],\n )",
"def setup(app):\n # register the two Sphinx config values used for the extension\n app.add_config_value('sp_exercise_directory', None, 'env')\n\n # register the custom docutils nodes with Sphinx\n app.add_enumerable_node(\n exercise,\n 'exercise',\n exercise_title_getter,\n html=(visit_exercise_node, depart_exercise_node),\n latex=(visit_exercise_node_, depart_exercise_node_),\n text=(visit_exercise_node_, depart_exercise_node_)\n )\n app.add_node(\n exercise_title,\n html=(visit_exercise_title_node, depart_exercise_title_node),\n latex=(visit_exercise_title_node_, depart_exercise_title_node_),\n text=(visit_exercise_title_node_, depart_exercise_title_node_)\n )\n app.add_enumerable_node(\n solution,\n 'solution',\n solution_title_getter,\n html=(visit_solution_node, depart_solution_node),\n latex=(visit_solution_node_, depart_solution_node_),\n text=(visit_solution_node_, depart_solution_node_)\n )\n app.add_node(\n solution_title,\n html=(visit_solution_title_node, depart_solution_title_node),\n latex=(visit_solution_title_node_, depart_solution_title_node_),\n text=(visit_solution_title_node_, depart_solution_title_node_)\n )\n\n # ensure the required auxiliary files are included in the Sphinx build\n app.connect('builder-inited', include_static_files)\n if not sphinx_prolog.is_css_registered(app, STATIC_FILE):\n app.add_css_file(STATIC_FILE)\n\n # register the custom directives with Sphinx\n app.add_directive('exercise', Exercise)\n app.add_directive('solution', Solution)\n\n # connect custom hooks to the Sphinx build process\n app.connect('config-inited', set_exercise_numfig_format)\n app.connect('config-inited', set_solution_numfig_format)\n app.connect('doctree-read', fix_solution_numrefs_pre)\n app.connect('doctree-resolved', fix_solution_numrefs_post)\n\n return {'version': sphinx_prolog.VERSION}",
"def push_blog():\n\n\twarn(green(\"Update blog on github pages.\"))\n\t_setup_virtualenv()\n\n\twith cd(PROJECT_PATH):\n\t\twith prefix(env.activate):\n\t\t\tlocal('python blog.py build', shell='/bin/bash')\n\n\t\tlocal('cd {}'.format(FREEZER_DESTINATION), shell='/bin/bash')\n\t\tlocal('git status')\n\t\task_msg = red(\"Force push new content to blog?\")\n\t\tif console.confirm(ask_msg, default=False) is True:\n\t\t\tlocal('git add --all')\n\t\t\tlocal('git commit -m \"new articles\"')\n\t\t\tlocal('git push --force origin master')",
"def setup(app: sphinx.application.Sphinx) -> Dict[str, Any]:\n app.require_sphinx(\"3.0\")\n\n app.add_config_value(\n \"pygments_dark_style\", default=\"native\", rebuild=\"env\", types=[str]\n )\n\n app.add_html_theme(\"furo\", str(THEME_PATH))\n\n app.add_post_transform(WrapTableAndMathInAContainerTransform)\n\n app.connect(\"html-page-context\", _html_page_context)\n app.connect(\"builder-inited\", _builder_inited)\n app.connect(\"build-finished\", _overwrite_pygments_css)\n\n return {\n \"parallel_read_safe\": True,\n \"parallel_write_safe\": True,\n \"version\": __version__,\n }",
"def setup_docs(self):\n for arg in self.args:\n self.log.debug(\"Processing arg %s\" % arg)\n if isinstance(arg, dexy.doc.Doc) or isinstance(arg, dexy.doc.PatternDoc):\n doc = arg\n\n elif isinstance(arg, list):\n if not isinstance(arg[0], basestring):\n raise Exception(\"First arg %s should be a string\" % arg[0])\n if not isinstance(arg[1], dict):\n raise Exception(\"Second arg %s should be a dict\" % arg[1])\n\n if not \"*\" in arg[0]:\n doc = dexy.doc.Doc(arg[0], **arg[1])\n else:\n # This is a pattern doc or real doc TODO better way to verify?\n doc = dexy.doc.PatternDoc(arg[0], **arg[1])\n\n elif isinstance(arg, basestring):\n doc = dexy.doc.PatternDoc(arg)\n\n else:\n raise Exception(\"unknown arg type %s for arg %s\" % (arg.__class__.__name__, arg))\n\n doc.wrapper = self\n doc.setup()\n\n self.docs.append(doc)",
"def run_package(m):\n\n if m.args.upload:\n doc = find_fs_package_from_dir(m.args.source)\n else:\n doc = find_csv_package(m)\n\n url, user, password = get_site_config(m.args.site_name)\n wp = Client(url, user, password)\n\n post = get_or_new_post(m, wp, doc)\n\n assert post is not None\n\n if m.args.upload:\n upload_to_wordpress(wp, post, doc)\n\n content = html(doc, m.args.template)\n\n post.excerpt = doc['Root'].get_value('Root.Description') or content[:200]\n\n post_tags = list(set(\n [t.value for t in doc['Root'].find('Root.Tag')] +\n [t.value for t in doc['Root'].find('Root.Group')] +\n [doc['Root'].get_value('Root.Origin')] +\n list(split_groups_tags(m.args.group)) +\n list(split_groups_tags(m.args.tag))\n ))\n\n post.terms_names = {\n 'post_tag': post_tags,\n 'category': ['Dataset'] + list(split_groups_tags(m.args.group))\n }\n\n post.title = doc.get_value('Root.Title')\n post.slug = slugify(doc.nonver_name)\n post.content = content\n\n if m.args.publish:\n post.post_status = 'publish'\n\n try:\n if m.args.no_op:\n r = {}\n else:\n r = wp.call(EditPost(post.id, post))\n except Fault as e:\n\n if 'taxonomies' in e.faultString:\n err((\"User {} does not have permissions to add terms to taxonomies. \"\n \"Terms are: {}\").format(user, post.terms_names))\n\n raise\n\n return r",
"def _configure_sphinx(self):\n require('db_host')\n require('db_user')\n require('db_password')\n require('db_name')\n require('sphinx_counter')\n logger.info(\"Configure sphinx search daemon\")\n\n # Build /etc/sphinx.conf\n context = {\n 'database_user': env.db_user,\n 'database_password': env.db_password,\n 'database_name': env.db_name,\n 'database_host': env.db_host,\n 'counter': env.sphinx_counter,\n }\n with hide(*fab_output_hides):\n logger.info(\"Building /etc/sphinxsearch/sphinx.conf\")\n upload_template(\n 'sphinx/sphinx.conf',\n '/etc/sphinxsearch/sphinx.conf',\n context=context,\n use_jinja=True,\n template_dir=CONFIG_TPL_DIR,\n use_sudo=True,\n mode=0644,\n )\n\n script_destination = (\n '/var/lib/sphinxsearch/%s_indexer.sh' % env.db_name\n )\n with hide(*fab_output_hides):\n logger.info(\"Building %s\", script_destination)\n put(\n '../config/tpl/sphinx/policystat_indexer.sh',\n script_destination,\n mode=0755,\n use_sudo=True,\n )\n sudo('chown %s %s' % (F_CHOWN, script_destination))",
"def run(self):\n name_desc = self.__class__.name_sphinx\n settings = self.state.document.settings\n env = settings.env if hasattr(settings, \"env\") else None\n docname = None if env is None else env.docname\n tag = self.options.get('tag', '').strip()\n n = self.__class__.node_class('')\n n[\"breftag\"] = tag\n n[\"brefsort\"] = self.options.get('sort', 'title').strip()\n n[\"brefsection\"] = self.options.get(\n 'section', True) in (True, \"True\", \"true\", 1, \"1\")\n n[\"brefcontents\"] = self.options.get(\n 'contents', False) in (True, \"True\", \"true\", 1, \"1\", \"\", None, \"None\")\n n['docname'] = docname\n if env is not None:\n targetid = 'index%slist-%s' % (name_desc,\n env.new_serialno('index%slist' % name_desc))\n targetnode = nodes.target('', '', ids=[targetid])\n return [targetnode, n]\n else:\n return [n]",
"def add(self, posts):\n for post in posts:\n self._feed.add(FeedEntry(\n summary=post.summary,\n title=post.title,\n title_type='html',\n url=post.url,\n updated=post.date,\n ))",
"def store_posts( fullpath, category ):\n\n # read in the post data and store it in the posts list\n post_data = read_posts( fullpath, config )\n posts.extend( post_data )\n\n # generate an appropriate num of labels for the posts\n num_posts = len( post_data )\n labels.extend( [category] * num_posts )\n\n logging.debug(\"Read %d posts from %s.\" % (num_posts, category) )",
"def setup(app):\n if hasattr(app, \"add_mapping\"):\n app.add_mapping('blocref', blocref_node)\n app.add_mapping('blocreflist', blocreflist)\n\n app.add_config_value('blocref_include_blocrefs', True, 'html')\n app.add_config_value('blocref_link_only', False, 'html')\n\n app.add_node(blocreflist,\n html=(visit_blocreflist_node, depart_blocreflist_node),\n epub=(visit_blocreflist_node, depart_blocreflist_node),\n latex=(visit_blocreflist_node, depart_blocreflist_node),\n elatex=(visit_blocreflist_node, depart_blocreflist_node),\n text=(visit_blocreflist_node, depart_blocreflist_node),\n md=(visit_blocreflist_node, depart_blocreflist_node),\n rst=(visit_blocreflist_node, depart_blocreflist_node))\n app.add_node(blocref_node,\n html=(visit_blocref_node, depart_blocref_node),\n epub=(visit_blocref_node, depart_blocref_node),\n elatex=(visit_blocref_node, depart_blocref_node),\n latex=(visit_blocref_node, depart_blocref_node),\n text=(visit_blocref_node, depart_blocref_node),\n md=(visit_blocref_node, depart_blocref_node),\n rst=(visit_blocref_node, depart_blocref_node))\n\n app.add_directive('blocref', BlocRef)\n app.add_directive('blocreflist', BlocRefList)\n app.connect('doctree-read', process_blocrefs)\n app.connect('doctree-resolved', process_blocref_nodes)\n app.connect('env-purge-doc', purge_blocrefs)\n app.connect('env-merge-info', merge_blocref)\n return {'version': sphinx.__display_version__, 'parallel_read_safe': True}",
"def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()",
"def register_post_parser(self, fct, cfg, ctx):\n self.post_parsers.append((fct, cfg, ctx))",
"def setup(app):\n app.setup_extension('sphinx.ext.graphviz') # To set all defaults.\n app.add_node(\n dsp,\n html=(html_visit_dispatcher, None),\n latex=(latex_visit_graphviz, None),\n texinfo=(texinfo_visit_graphviz, None),\n text=(text_visit_graphviz, None),\n man=(man_visit_graphviz, None)\n )\n app.add_directive('dsp', DispatcherSphinxDirective)\n return {'version': sphinx.__display_version__, 'parallel_read_safe': True}",
"def add(self, posts):\n li_html = []\n for post in posts:\n li_html.append(\n u'<li><a href=\"{route}\">{title}</a></li>'.format(\n route=post.route, title=post.title))\n self._blog_list = u'\\n'.join(li_html)\n self._posts = posts",
"def purge_posts(app, env, docname):\n\n if hasattr(env, \"ablog_posts\"):\n env.ablog_posts.pop(docname, None)\n filename = os.path.split(docname)[1]\n env.domains[\"std\"].data[\"labels\"].pop(filename, None)\n env.domains[\"std\"].data[\"anonlabels\"].pop(filename, None)",
"def cli():\n update_all_posts()\n push_updates()",
"def setup(app: \"Sphinx\") -> dict:\n from .events import (\n InsertToctrees,\n TableofContents,\n add_changed_toctrees,\n ensure_index_file,\n parse_toc_to_env,\n )\n\n # variables\n app.add_config_value(\"external_toc_path\", \"_toc.yml\", \"env\")\n app.add_config_value(\"external_toc_exclude_missing\", False, \"env\")\n\n # Note: this needs to occur after merge_source_suffix event (priority 800)\n # this cannot be a builder-inited event, since if we change the master_doc\n # it will always mark the config as changed in the env setup and re-build everything\n app.connect(\"config-inited\", parse_toc_to_env, priority=900)\n app.connect(\"env-get-outdated\", add_changed_toctrees)\n app.add_directive(\"tableofcontents\", TableofContents)\n app.add_transform(InsertToctrees)\n app.connect(\"build-finished\", ensure_index_file)\n\n return {\"version\": __version__, \"parallel_read_safe\": True}",
"def add_post(self, post: Post) -> None:\n self.post_process.append(post)",
"def setup(app): # noqa\n # Wee want to override the directives:\n # - 'graph' from sphinx.ext.graphviz extension.\n # - 'uml' from sphinxcontrib.plantuml\n # But Sphinx warns of the override, causing failure if warnings are set\n # to fail documentation build. So, we go down and use docutils registering\n # directly instead.\n\n # app.add_directive('uml', UmlDirective)\n # app.add_directive('graph', GraphDirective)\n # app.add_directive('diagram', DiagramDirective)\n\n from docutils.parsers.rst import directives\n directives.register_directive('uml', UmlDirective)\n directives.register_directive('graph', GraphDirective)\n directives.register_directive('diagram', DiagramDirective)\n\n # Register the config value to allow to set plantweb defaults in conf.py\n app.add_config_value('plantweb_defaults', {}, 'env')\n\n # Register Plantweb defaults setter\n # Note: The str() is because:\n # - In Python 2.7, Sphinx expects a str, not unicode.\n # - In Python 3.4, Sphinx expects a str, not bytes.\n app.connect(str('builder-inited'), builder_inited_handler)",
"def write_postings(docname, postings, dbcon):\n cur = dbcon.cursor()\n for word, posting in postings.items():\n # generate text of indexes\n indexes = \"\"\n for ix in posting[\"indexes\"]:\n indexes += \"{},\".format(ix)\n indexes = indexes.rstrip(\",\")\n # insert into database; nested try is needed to handle rollback\n # and commit properly\n try:\n try:\n cur.execute(\"INSERT INTO IndexWord VALUES (?)\", (word,))\n except sqlite3.IntegrityError: # word already in index\n pass\n cur.execute(\n \"INSERT INTO Posting VALUES (?, ?, ?, ?)\",\n (word, docname, posting[\"frequency\"], indexes)\n )\n except Exception as e:\n print(e)\n dbcon.rollback()\n else:\n dbcon.commit()",
"def build():\n clean()\n jekyll('build')",
"def _builder_inited(app: sphinx.application.Sphinx) -> None:\n _write_member_documentation_pages(\n _create_documenter(env=app.env,\n documenter_cls=sphinx.ext.autodoc.ModuleDocumenter,\n name='tensorstore'))",
"def index_terms(self):\n [[self.set_postings(term, id) for term in NLProcessor.process(doc)] for id, doc in\n self.doc_store.dict.iteritems()]"
] |
[
"0.6535343",
"0.57032394",
"0.5624015",
"0.5593621",
"0.5499079",
"0.54742163",
"0.54069036",
"0.53931034",
"0.53523624",
"0.5307932",
"0.5259326",
"0.525728",
"0.5236999",
"0.5183155",
"0.5156276",
"0.51393497",
"0.5135678",
"0.51328725",
"0.5109809",
"0.5106693",
"0.50746286",
"0.50606537",
"0.504668",
"0.50336605",
"0.5028262",
"0.50270873",
"0.5012841",
"0.5007637",
"0.5006993",
"0.5002923"
] |
0.7119269
|
0
|
Use 2 variables to pass into the function for robustness. tn is the triggering number, tn = 1 will print all digits. otherwise, the lines will be whatever tn value is. xs is just 10 for now
|
def printstring(tn,xs): #Printing function
if (tn > -1) and (tn <= 9):
for x in range(xs): #Outer loop for line iteration
print("\n") #Need this new line to meet the output requirement
for y in range(xs): #Inner loop for horizontal printing
print(tn,end=' ') #User defined format, which is a
#digit followed by a white space
elif tn == -1:
for x in range(xs): #Outer loop for line iteration
print("\n") #Need this new line to meet the output requirement
for y in range(xs): #Inner loop for horizontal printing
print(y,end=' ') #User defined format, which is a
#digit followed by a white space
else:
print("Must be single digit, please!\n")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def printstringtp2(xs): #Printing function\n for x in range(xs+1): #Outer loop for line iteration\n print(\"\\n\")\n for y in range(x):\n print(y,end=' ')",
"def errs_tab(n):\n return [10**(q / -10) for q in range(n + 1)]",
"def run_trials(f, n):\n\tfor value in range(2, 3):\n\t\tprint(\"{:>3}:{:>5}\".format(value, f(n, value)))",
"def pl(num_of_lines=None):\n if num_of_lines is None:\n num_of_lines = 1\n print(\"\\n\" + (\"=\")*30 + \"\\nbeginning test print...\\n\" + (\"=\")*30 + \"\\n\")\n for _ in range(0, num_of_lines):\n print(stream.readline().strip())\n print(\"\\n\" + (\"=\")*30 + \"\\nending test print\\n\"+(\"=\")*30 + \"\\n\")",
"def test_problem2():\n print('Testing problem2. The next line should be 18, 23536, 61, 5')\n print(problem2(4, 2), end=', ')\n print(problem2(105, 2), end=', ')\n print(problem2(2, 5), end=', ')\n print(problem2(2, 2))",
"def print_num(head):\n ptr = head\n digits = []\n while ptr:\n digits.append(ptr.data)\n ptr = ptr.next_node\n print(\"\".join(map(str, reversed(digits))))",
"def test_numbers(number):\n print(\"\\nRunning test_numbers with {}\".format(number))",
"def exon_line_print(temp_line, trx_exons, parent, ftype):\n for ex in trx_exons:\n temp_line[2] = ftype\n temp_line[3] = str(ex[0])\n temp_line[4] = str(ex[1])\n temp_line[8] = 'Parent='+parent\n print '\\t'.join(temp_line)",
"def test_parse_trflp(self):\r\n\r\n data = \\\r\n \"\"\"\tBin (10bp)\tBin (20bp)\tBin (30bp)\tBin (40 bp)\r\nSamp-le 1\t1000\t2000\t3000\t4000\r\nSample 2\t\t2000\t3000\t4000\r\nSample 3\t\t\t3000\t4000\r\nSample 4\t\t\t\t4000\r\nSample 5\t25\t\t\t\"\"\"\r\n samples, otus, data = parse_trflp(data.split('\\n'))\r\n\r\n samples_exp = [\r\n 'Samp.le.1',\r\n 'Sample.2',\r\n 'Sample.3',\r\n 'Sample.4',\r\n 'Sample.5']\r\n otus_exp = ['Bin__10bp_', 'Bin__20bp_', 'Bin__30bp_', 'Bin__40_bp_']\r\n data_exp = array([[1000, 0, 0, 0, 25],\r\n [2000, 2000, 0, 0, 0],\r\n [3000, 3000, 3000, 0, 0],\r\n [4000, 4000, 4000, 4000, 0]])\r\n\r\n self.assertEqual(samples, samples_exp)\r\n self.assertEqual(otus, otus_exp)\r\n assert_almost_equal(data, data_exp)",
"def print_f(msg, i, t):\n if i == t-1:\n sys.stdout.write('\\n')\n return\n t += 1\n i += 1\n msg += '\\t'\n sys.stdout.write('\\r')\n sys.stdout.write(\"%s%s%% |%s\" % (msg, int(i % t), int(i % t) * '#'))\n sys.stdout.flush()",
"def last_tidy_num():\n with open(\"B-small-attempt2.in\", \"r\") as in_file, open(\"output.txt\", \"w\") as out_file:\n next(in_file)\n case_num = 1\n for line in in_file:\n current_num = int(line.strip())\n if current_num % 10 == 0 and str(current_num).startswith('1'):\n digit_count = len([d for d in str(current_num)]) - 1\n last_tidy = ''\n for _ in range(digit_count):\n last_tidy += '9'\n to_write = 'Case #' + str(case_num) + ': ' + str(last_tidy) + '\\n'\n case_num += 1\n out_file.write(to_write)\n print(last_tidy)\n elif current_num < 10:\n to_write = 'Case #' + str(case_num) + ': ' + str(current_num) + '\\n'\n out_file.write(to_write)\n case_num += 1\n print(current_num)\n else:\n for num in range(current_num, 0, -1):\n if num == int(sort_and_digitize(num)):\n to_write = 'Case #' + str(case_num) + ': ' + str(num) + '\\n'\n out_file.write(to_write)\n case_num += 1\n print(num)\n break",
"def record_digits(n):\n return record_digits_acc(str(n), 0, [0]*10)",
"def print_lines(df):\n gen_fun = print_lines_generator(df)\n while True:\n try:\n lines = input('Do you want to see 5 lines of raw data? Please type yes or no: ').lower()\n if lines == 'yes':\n try:\n print(next(gen_fun))\n except StopIteration:\n print('You have reached the end of this file. Exiting...')\n break\n elif lines == 'no':\n break\n else:\n raise ValueError\n except ValueError:\n print('That\\'s not a valid input! Please enter yes or no.\\n')",
"def print_num(self, num):\r\n if num == \"\":\r\n print(str(self.text_lines))\r\n else:\r\n num = int(num)\r\n if num == 0:\r\n raise ValueError(\"Zero is not a valid line number\")\r\n elif num > 0:\r\n num -= 1\r\n print(self.text_lines[num])",
"def print(self,n):\r\n c = 0\r\n for i in n:\r\n for j in i:\r\n if c == 9:\r\n print()\r\n c = 0\r\n c = c+1\r\n print(j, end=\" \")",
"def return_digits(number):\n res = \"\"\n row = 0\n while row < 7:\n line = \"\"\n column = 0\n while column < len(number):\n symb = int(number[column])\n digit = Digits[symb]\n line += digit[row].replace(\"*\", str(symb))\n column += 1\n row += 1\n if row != 7:\n res += line + \"\\n\"\n else:\n res += line\n return res",
"def display(n):\n print ' -> '.join(map(str, to_list(n)))",
"def display(n):\n print ' -> '.join(map(str, to_list(n)))",
"def lessthan_5(num_list):",
"def display_data(data):\n\n index = 0\n for details in data:\n index += 1\n print(\"{5:1}{0}. {1:10} in {2:15} priority {3:>3}\".format(index, *details))",
"def print_exact_res():\n\n print(\"************************************************************\")\n print(\"EXACT RESULT FOR THE VALUES THAT MINIMIZE THE FUNCTION:\\n\")\n f_str = \"The value of x_{:d} is {:f}\"\n for i in range(1, cfg.n + 1):\n print(f_str.format(i, float(i)))\n print(\"************************************************************\\n\")",
"def print_table(n):\n \n numbers = list(range(1, n + 1))\n\n #处理第一行\n s = ''\n for i in numbers:\n s = s + '\\t' + str(i)\n print(s)\n\n for i in numbers:\n s = str(i)\n for j in numbers:\n s = s + '\\t' + str(i * j)\n print(s)",
"def print_tlines(ty,slist,scaledtime, farright):\r\n xinc = 0.005\r\n yinc = 0.002\r\n if(scaledtime != []):\r\n if max(scaledtime)/1e6 < 1.0:\r\n yearscaler = 1e3\r\n yearscalestring = \" KYR\"\r\n else:\r\n yearscaler = 1e6\r\n yearscalestring = \" MYR\"\r\n if gv[\"eventimes\"] == False:\r\n for i in range(numpops-1):\r\n if (ty[i][1] > ty[i][0]):\r\n yline(ty[i][1],farright,1,2,gv[\"graylevel\"])\r\n yline(ty[i][0],farright,0.5,0,0)\r\n if (ty[i][2] < ty[i][0]):\r\n yline(ty[i][2],farright,1,2,gv[\"graylevel\"])\r\n if(scaledtime != []):\r\n scaledtime[i] /= yearscaler\r\n mtime = round(scaledtime[i],-int(math.log10(scaledtime[i])-2))\r\n nstr = str(mtime) + yearscalestring\r\n ## str(int(round(scaledtime[i],-int(math.log10(scaledtime[i])-2)))) + \" yrs\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n else:\r\n nstr = fround(slist[5][4][i][1]) + \"tu\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n if (ty[i][1] > ty[i][0]):\r\n arrow([xinc*(i+1),ty[i][1]],[xinc*(i+1),ty[i][0]],1, gv[\"black\"])\r\n if (ty[i][2] < ty[i][0]):\r\n arrow([xinc*(i+1),ty[i][2]],[xinc*(i+1),ty[i][0]],3, gv[\"black\"])\r\n else:\r\n for i in range(numpops-1):\r\n yline(ty[i][0],farright,0.5,0,0)\r\n if(scaledtime != []):\r\n scaledtime[i] /= yearscaler\r\n mtime = round(scaledtime[i],-int(math.log10(scaledtime[i])-2))\r\n nstr = str(mtime) + yearscalestring\r\n ## str(int(round(scaledtime[i],-int(math.log10(scaledtime[i])-2)))) + \" yrs\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n else:\r\n nstr = fround(slist[5][4][i][1]) + \"tu\"\r\n dotext([xinc*(i+2),ty[i][0]+yinc],nstr,0, False)\r\n return ty",
"def print_triangular_numbers(n):\r\n\r\n\tfor i in range(1, n+1):\r\n\t\tsum = int((i / 2)*(1 + i))\r\n\t\tprint(i, \"\\t\", sum)",
"def ten_by_ten():\n \n n=1\n while n<=100:\n if n%10 == 0:\n print n\n else:\n print n,\n n +=1",
"def alert(n):\n for i in range(n):\n print(''.join([start, ' ', end, '!']))",
"def cascade(n):\n print(n)\n if n >= 10:\n cascade(n//10)\n print(n)",
"def print_antex_header(antType,valid_from,valid_to,f):\n f.write(\" START OF ANTENNA\\n\")\n f.write(\"{:<20s} TYPE / SERIAL NO\\n\".format(antType))\n f.write(\"CALCULATED ANU 0 25-MAR-11 METH / BY / # / DATE\\n\")\n f.write(\" 0.5 DAZI\\n\")\n f.write(\" 0.0 90.0 0.5 ZEN1 / ZEN2 / DZEN\\n\")\n f.write(\" 2 # OF FREQUENCIES\\n\")\n\n # valid_from is a dto (datetime object\n yyyy, MM, dd, hh, mm, ss, ms = gt.dt2validFrom(valid_from)\n # force seconds to 0.00 for valid from\n f.write(\"{:>6s} {:>5s} {:>5s} {:>5s} {:>5s} 0.0000000 VALID FROM\\n\".format(yyyy,MM,dd,hh,mm))\n yyyy, MM, dd, hh, mm, ss, ms = gt.dt2validFrom(valid_to)\n hh = str(23)\n mm = str(59)\n f.write(\"{:>6s} {:>5s} {:>5s} {:>5s} {:>5s} 59.9999999 VALID UNTIL\\n\".format(yyyy,MM,dd,hh,mm))\n #\n # Change the numbers after ANU to the same code as the previous antenna \n #\n f.write(\"ANU08_1648 SINEX CODE\\n\")\n f.write(\"CALCULATED From MIT repro2 COMMENT\\n\")\n\n return 1",
"def fn(n):\n if not n: return []\n elif n < 20: return [mp[n]]\n elif n < 100: return [mp[n//10*10]] + fn(n%10)\n else: return [mp[n//100], \"Hundred\"] + fn(n%100)",
"def print_solutions(file_):\n with open(file_, 'r') as inp:\n for line in inp:\n print(line[:-5] + str(process_line(line)))"
] |
[
"0.5916246",
"0.54745644",
"0.53639555",
"0.53562635",
"0.52387565",
"0.52075",
"0.5177339",
"0.51054317",
"0.5100311",
"0.50899476",
"0.5084036",
"0.5077306",
"0.50571287",
"0.50172555",
"0.49812546",
"0.49663547",
"0.49617088",
"0.49617088",
"0.49607396",
"0.49602693",
"0.49536377",
"0.4944815",
"0.49405178",
"0.49300244",
"0.49296236",
"0.4899773",
"0.48977003",
"0.48951373",
"0.48678058",
"0.48430303"
] |
0.70049286
|
0
|
Gives user a choice on how to write a release note
|
def select_input(cls):
print('Would you like to insert release notes?')
print('0) Cancel')
print('1) Insert directly from command line')
print('2) Insert from a file')
inputValue = 0
inputNote = False
try:
try: input = raw_input
except NameError: pass
inputValue = input('Select option: ')
inputValue = int(inputValue)
except SyntaxError:
inputValue = 0
if inputValue is 0:
print('Failed to create a release note')
return False
elif inputValue is 1:
inputNote = cls.cmd_note()
elif inputValue is 2:
inputNote = cls.select_file()
if inputNote is not False:
with open(Settings.releaseNotePath, 'w') as release_notes:
release_notes.writelines(inputNote)
print('Successfuly created release note')
return inputNote
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def select_input(cls):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n print('Would you like to insert release notes?')\n print('0) Cancel')\n print('1) Insert directly from command line')\n print('2) Insert from a file')\n\n cls.init()\n\n inputValue = 0\n inputNote = False\n try:\n try: input = raw_input\n except NameError: pass\n inputValue = input('Select option: ')\n inputValue = int(inputValue)\n except SyntaxError:\n inputValue = 0\n if inputValue is 0:\n cls.logger.warning('Failed to create a release note')\n return False\n elif inputValue is 1:\n inputNote = cls.cmd_note()\n elif inputValue is 2:\n inputNote = cls.select_file()\n if inputNote is not False:\n mode = 'r+' if os.path.exists(Settings.releaseNotePath) else 'w+'\n with open(Settings.releaseNotePath, mode) as release_notes:\n oldContent = release_notes.read()\n release_notes.seek(0, 0)\n release_notes.write(inputNote.rstrip('\\r\\n') + '\\n' + oldContent)\n cls.logger.info('Successfuly created release note')\n # return to the base directory\n Utility.popd()\n return inputNote",
"def cmd_note(cls): \n print(\"Enter/Paste your release note. To save use Ctrl-Z (windows) or Ctrl-D in a new line and press Enter.\")\n inputNote = sys.stdin.readlines()\n note = ''\n for line in inputNote:\n note += line\n return note",
"def cmd_note(cls): \n print(\"Enter/Paste your release note. To save use Ctrl-Z (windows) or Ctrl-D in a new line and press Enter.\")\n inputNote = sys.stdin.readlines()\n note = ''\n for line in inputNote:\n note += line\n return note",
"def note(self):\n content = sys.argv[2]\n self.record('NOTE %s' % content)\n print('Note added')",
"def select_file(cls):\n note = ''\n if module_exists('Tkinter'):\n from Tkinter import Tk\n import tkFileDialog\n root = Tk()\n root.filename = tkFileDialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"text files\",\"*.txt\"),(\"all files\",\"*.*\")))\n if module_exists('tkinter'):\n import tkinter\n from tkinter import filedialog\n root = tkinter.Tk()\n root.filename = filedialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"text files\",\"*.txt\"),(\"all files\",\"*.*\")))\n if not root.filename:\n root.destroy()\n cancel_input = yes_no(\"Would you like to add a release note?\")\n if cancel_input is True:\n cls.select_input()\n else:\n return False\n else:\n with open(root.filename, 'r') as file_notes:\n lines = file_notes.readlines()\n for line in lines:\n note += line\n if note == '':\n note = False\n return note",
"def select_file(cls):\n cls.init()\n note = ''\n if module_exists('Tkinter'):\n from Tkinter import Tk\n import tkFileDialog\n root = Tk()\n root.filename = tkFileDialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"text files\",\"*.txt\"),(\"all files\",\"*.*\")))\n if module_exists('tkinter'):\n import tkinter\n from tkinter import filedialog\n root = tkinter.Tk()\n root.filename = filedialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"text files\",\"*.txt\"),(\"all files\",\"*.*\")))\n if not root.filename:\n root.destroy()\n cancel_input = yes_no(\"Would you like to add a release note?\")\n if cancel_input is True:\n cls.select_input()\n else:\n return False\n else:\n with open(root.filename, 'r') as file_notes:\n lines = file_notes.readlines()\n for line in lines:\n note += line\n if note == '':\n note = False\n cls.logger.debug(\"Note selected from a file.\")\n return note",
"def note():",
"def input_description(self, bot, update):\n logger.info(\"WAITING FOR DESCRIPTION\")\n\n self.description = update.message.text\n update.message.reply_text(\"Please select a date: \",\n reply_markup=ReplyKeyboardMarkup(\n (('Now', 'Other date'),),\n one_time_keyboard=True\n ))\n return 1",
"def take_note(self, text):\r\n\r\n self.date = str(datetime.datetime.now().date()) + \"%\" + str(datetime.datetime.now().hour) + \"+\" + str(\r\n datetime.datetime.now().minute) + \"}\"\r\n self.file_name = \"notes/\" + str(self.date).replace(\":\", \"-\") + \"-note.txt\"\r\n with open(self.file_name, \"w\") as f:\r\n f.write(text)\r\n # subprocess.Popen([\"notepad.exe\", self.file_name])\r",
"def show_confirm_version(name, version, release_notes, confirm, will_push, test):\n\n print()\n print(\"Name: %s\" % name)\n print(\"Version: %s\" % version)\n print()\n\n print(\"Release Notes\")\n print(release_notes)\n\n print()\n\n if will_push:\n print(\"Saying yes will automatically push the tag to `origin`, triggering the release immediately\")\n else:\n print(\"The tag **will not** be pushed automatically, you will need to call `git push --tags` yourself\")\n\n if test:\n print()\n print(\"**This will be a dry-run that will not actually release anything permanently.**\")\n\n print()\n\n if confirm:\n val = input(\"Are you sure [y/N]? \")\n if val.lower() != 'y':\n raise GenericError(\"Cancelled by user\", 100)",
"def notesmenu():\r\n print('''\\n%s at %s acting as user %s\r\n\\nDevice Label and Notes Menu''' % (PACKETMASTER.model, ADDRESS, USERNAME))\r\n choice = moves.input('''\r\n 1 - Get Label and Notes\r\n 2 - Change Label only\r\n 3 - Change Label and Notes\r\n 4 - Back\r\n 5 - Quit \\n\r\n Enter selection number: ''')\r\n try:\r\n choice = int(choice)\r\n except ValueError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n notesmenu()\r\n execute = {1: PACKETMASTER.device_label,\r\n 2: PACKETMASTER.set_name_guided,\r\n 3: PACKETMASTER.set_label_guided,\r\n 4: hardwareconfig,\r\n 5: exit}\r\n if choice in execute:\r\n try:\r\n select = execute[choice]\r\n run = select()\r\n print(run)\r\n notesmenu()\r\n except KeyError as reason:\r\n print(reason)\r\n else:\r\n print(\"That is not a valid selection.\")\r\n notesmenu()",
"def add_note():\n pass",
"def enter_notes():\n notes = input(\"Notes (Optional): \")\n clean_scr()\n return notes",
"def get_notes():\n clear()\n notes = input(\"Notes (Optional, leave blank if none): \")\n return notes",
"def release_notes(version, author, git_ref_target, git_ref_source, build_type):\n print('generating release notes')\n if git_ref_source:\n if git_ref_source != 'HEAD':\n git_ref_source = 'origin/{}'.format(git_ref_source)\n changelog = run('git log origin/{}..{}'.format(git_ref_target,\n git_ref_source))\n else:\n git_ref_source = 'origin/master'\n changelog = run('git log {}..origin/{}'.format(git_ref_source, git_ref_target))\n notes = {\n 'version': version,\n 'author': author,\n 'build_type': build_type,\n 'date': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\n 'changelog': changelog.stdout\n }\n return notes",
"def display_note(self, note):\n\t\tself.canvas.itemconfig(self.note, text = note)",
"def about(display=True):\n\n ABOUT_TEXT = \"\"\"\nPre-release version %s (%s) of Topographica; an updated\nversion may be available from topographica.org.\n\nThis program is free, open-source software available under the BSD\nlicense (http://www.opensource.org/licenses/bsd-license.php).\n\"\"\"%(release,version)\n if display:\n print ABOUT_TEXT\n else:\n return ABOUT_TEXT",
"def cmd_note(self, msg, args):\n\n if len(args)<2:\n return \"Missing arguments\"\n\n cmd = args[0]\n dat = args[1]\n\n\n if \".\" in dat:\n return \"dots are not allowed in notename\"\n\n if cmd == \"set\":\n if len(args)<3:\n return \"set command needs more arguments\"\n txt = \" \".join(args[2:])\n try:\n with file(os.path.join(self.path, \"%s.txt\" % (dat,)), 'w') as f:\n f.write(txt)\n return \"note set!\"\n except BaseException as e:\n return \"unable to set the note: %s\" % (e,)\n elif cmd == \"get\":\n try:\n with file(os.path.join(self.path, \"%s.txt\" % (dat,)), 'r') as f:\n return \"\\n\".join(f.readlines())\n except BaseException as e:\n return \"No note with this name found: %s\" % (e,)\n elif cmd == \"del\":\n try:\n os.remove(os.path.join(self.path, \"%s.txt\" % (dat,)))\n return \"note removed!\"\n except BaseException as e:\n return \"unable to remove note\" % (e,)\n elif cmd == \"add\":\n if len(args)<3:\n return \"add command needs more arguments\"\n txt = \" \".join(args[2:])\n try:\n f = file(os.path.join(self.path, \"%s.txt\" % (dat,)), 'r')\n l = f.readlines()\n f.close()\n l.append(txt)\n f = file(os.path.join(self.path, \"%s.txt\" % (dat,)), 'w')\n f.write(\"\\n\".join(l))\n f.close()\n return \"note set!\"\n except BaseException as e:\n return \"unable to add text in note: %s\" % (e,)\n else:\n return \"Invalid command\"",
"def PromptQuestion(requestedProduct, downloadLocation):\n question = \"\\n {0} selected!!: Are you sure you wish to download the latest \" \\\n \"version of '{0}' to {1} ?: \".format(requestedProduct.upper(), downloadLocation)\n return question",
"async def note(self, ctx):\n note_embed = discord.Embed(color=discord.Color.blurple())\n note_embed.add_field(name=\"__**Please Note**__\", value=RULES_NOTE)\n await ctx.send(embed=note_embed)",
"def addNote(self, note):\n logger.debug(\"Func: addNote\")\n\n if not self._currentBaseSceneName:\n logger.warning(\"No Base Scene file selected\")\n return\n if self._currentVersionIndex == -1:\n logger.warning(\"No Version selected\")\n return\n now = datetime.datetime.now().strftime(\"%d/%m/%Y-%H:%M\")\n self._currentNotes = \"%s\\n[%s] on %s\\n%s\\n\" % (self._currentNotes, self.currentUser, now, note)\n self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"Note\"] = self._currentNotes\n self._dumpJson(self._currentSceneInfo, self._baseScenesInCategory[self._currentBaseSceneName])",
"def main_menu():\n\tprint(\n\"\"\"\nUsage :-\n$ ./todo add \"todo item\" # Add a new todo\n$ ./todo ls # Show remaining todos\n$ ./todo del NUMBER # Delete a todo\n$ ./todo done NUMBER # Complete a todo\n$ ./todo help # Show usage\n$ ./todo report # Statistics\"\"\")",
"def set_note_version(cls, version):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n cls.init()\n notes_file = 'releases.txt'\n note = cls.get_note(notes_file)\n if note is not False:\n new_note = '---------------------------------------------------------------------\\n' + \\\n 'Version: ' + version + '\\n' + \\\n '---------------------------------------------------------------------\\n'\n if os.path.isfile(notes_file):\n with open(notes_file,\"r\") as src:\n all_notes=src.readlines()\n if '--------------------------------------------' not in all_notes[0]:\n all_notes.insert(0,new_note)\n else:\n all_notes = new_note\n\n with open(notes_file, 'w') as release_notes:\n release_notes.writelines(all_notes)\n cls.logger.info(\"Release notes vesion set: \" + version)\n # return to the base directory\n Utility.popd()",
"def get_intro_message() -> str:\n return \"\"\"You are about to begin a new record.\nType the text sample you want to record.\nThis first sample MUST be typed by the real user (no impostor data).\"\"\"",
"def edit_notes(entry):\n entry.notes = get_notes()\n entry.save()\n input(\"Edit successful. \")\n return entry",
"def subChoose():\n\n print \"Please Choose Subject:\\n\\n1, Carms\\n2, Runes\\n3, Defence Against Dark Arts\\n4, Astronomy\\n5, Bonus\"",
"def get_release_note(comments):\n release_note = \"\"\n i = 0\n for comment in comments:\n #pprint.pprint(comment)\n #print \"**** Comment-{0}: {1}\".format(i, comment['body'])\n #print \"**** Comment-{index}: {body}\".format(\n # index=i,\n # body=comment['body']\n # )\n #print \"\\tURL: {0}\".format(comment['html_url'])\n #print \"\\tURL: {url}\".format(url=comment['html_url'])\n #comment['body'].index('Changed make')\n if comment['body'].lower().find('changed make') >= 0:\n #print \"Found 'Release Note'\"\n release_note = comment['body']\n #else:\n #print \"No 'Release Note' found\"\n\n i += 1\n # print \"----------------------------------------------------------\\n\"\n return release_note",
"def test(self):\n self.note(\"Test Note\", \"\"\" This is a note.\nsecond line\"\"\", \"date\")",
"def main_menu_selection():\n action = input('''\n Pleaes select one:\n\n a - Send a thank you\n b - Create a report\n c - Quit\n >''')\n\n return action.strip()",
"async def test_release_notes(doof, repo_info, event_loop, mocker):\n old_version = \"0.1.2\"\n update_version_mock = mocker.patch('bot.update_version', autospec=True, return_value=old_version)\n notes = \"some notes\"\n create_release_notes_mock = mocker.patch('bot.create_release_notes', autospec=True, return_value=notes)\n\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=['release', 'notes'],\n loop=event_loop,\n )\n\n update_version_mock.assert_called_once_with(\"9.9.9\")\n create_release_notes_mock.assert_called_once_with(old_version, with_checkboxes=False)\n\n assert doof.said(\"Release notes since {}\".format(old_version))\n assert doof.said(notes)"
] |
[
"0.74510175",
"0.6731308",
"0.6731308",
"0.6720483",
"0.6429505",
"0.6398019",
"0.63253635",
"0.62014854",
"0.6157431",
"0.6125236",
"0.6102147",
"0.6084187",
"0.59821856",
"0.5821075",
"0.5782732",
"0.5755479",
"0.5750794",
"0.5741082",
"0.5711053",
"0.5698787",
"0.56906605",
"0.567605",
"0.56656677",
"0.5622369",
"0.5596838",
"0.55769795",
"0.5561224",
"0.55535793",
"0.5523927",
"0.5504628"
] |
0.74380577
|
1
|
Return a sequence of oslo.messaging.Target This sequence is defining the exchange and topics to be connected for this plugin.
|
def get_targets(conf):
return [oslo.messaging.Target(topic=topic,
exchange=conf.glance_control_exchange)
for topic in conf.notification_topics]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def targets(self): # type: () -> t.List[HostConfig]\n return self.host_settings.targets",
"def get_next_target_addresses(self) -> List[str]:\n targets = []\n for edge in self._get_out_edges(self.active_pod):\n targets.append(self._get_target_pod(edge.pod).full_address)\n return targets",
"def Targets(self):\n return self._targets",
"def get_target_ports(self):\n return self.targets",
"def ProduceTargets(self):\n\n if self.completion_wanted:\n return self._FindTarget()\n else:\n return []",
"def targets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContactTargetsArgs']]]]:\n return pulumi.get(self, \"targets\")",
"def get_next_targets(self) -> List['RoutingTable']:\n targets = []\n for edge in self._get_out_edges(self.active_pod):\n new_graph = RoutingTable(self, copy=True)\n new_graph.active_pod = edge.pod\n targets.append((new_graph, edge.send_as_bind))\n return targets",
"def GetTargets(self):\n return []",
"def targets(self):\n\n return [get_target_by_id(i) for i in self._target_ids]",
"def target(self) -> list[str]:\n if self._target is None:\n print(self.__class__.target.__doc__)\n raise SilSubProblemError(\n \"The *target* property has not been set (see above).\"\n )\n return self._target",
"def get_targets(self):\n\t\treturn self.prDoc['inputs']['data'][0]['targets']",
"def get_targets(self, sample, net_output):\n return sample[\"target\"]",
"def targets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PlanTargetsArgs']]]]:\n return pulumi.get(self, \"targets\")",
"def get_hosts(self, target, listener_type):",
"def targets(self) -> List[Point2]:\n return self._targets",
"def target_ids(self):\n\n return self._target_ids",
"def conn_target(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tconn_module = None\n\t\tfor mod in self.conn_modules:\n\t\t\tif mod.module_id == self.build['conn_module']:\n\t\t\t\tconn_module = mod\n\t\t\t\tbreak\n\t\tif conn_module is None:\n\t\t\tself.fail('Couldn\\'t find conn_module ' + self.build['conn_module']) # pragma: no cover\n\n\t\t# Set up the target in pexpect.\n\t\tconn_module.get_config(self)\n\t\tconn_module.build(self)",
"def get_reply_target(self, agent, collective):",
"def get_target_actors(self):\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors",
"def get_target_actors(self):\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors",
"def get_target_actors(self):\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors",
"def get_target_actors(self):\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors",
"def targets(self):\n ret = {}\n for device in self.devices:\n ret[device.name] = device.name\n return ret",
"def get_targets():\n # Use a list comp because querying MODM with Guid.find(Q('referent', 'eq', None))\n # only catches the first case.\n return [each for each in Guid.find() if each.referent is None]",
"def find_targetnodes(self):\n\n self.connect_backwards()\n\n targetnodes = []\n for n in self.find_datanodes():\n if len(n.receives_from) > 0:\n targetnodes.append(n)\n return targetnodes",
"def targets(self):\n\n\t\tstatus, targets = self.execute(self.mission, 'target_list', self.kingdom)\n\n\t\t# Nothing specified : default is everyone but me.\n\t\tif targets == self:\n\t\t\ttargets = Kingdom.objects.exclude(id=self.kingdom_id)\n\t\t\n\t\t# Pre-fetch user, for direct access to kingdom name.\n\t\tif isinstance(targets, QuerySet):\n\t\t\ttargets = targets.select_related('user')\n\n\t\treturn targets",
"def target(self) :\n\t\ttry :\n\t\t\treturn self._target\n\t\texcept Exception as e:\n\t\t\traise e",
"def target(self) :\n\t\ttry :\n\t\t\treturn self._target\n\t\texcept Exception as e:\n\t\t\traise e",
"def getTarget(self):\n return self.Target",
"def fetch_target(self, name):\n target_name = []\n \n for x in [\"FFJ0\", \"FFJ3\", \"FFJ4\",\n \"MFJ0\", \"MFJ3\", \"MFJ4\",\n \"RFJ0\", \"RFJ3\", \"RFJ4\",\n \"LFJ0\", \"LFJ3\", \"LFJ4\", \"LFJ5\",\n \"THJ1\", \"THJ2\", \"THJ3\", \"THJ4\", \"THJ5\",\n \"WRJ1\", \"WRJ2\" ]:\n target_name.append( joint(joint_name = x, \n joint_target = rospy.get_param('/targets/'+name+'/'+x)) )\n return target_name"
] |
[
"0.6079792",
"0.5927731",
"0.58305705",
"0.5789932",
"0.57026273",
"0.56879294",
"0.5579699",
"0.5555303",
"0.55435085",
"0.5541425",
"0.5445215",
"0.54273444",
"0.53700495",
"0.5360802",
"0.53459704",
"0.5276226",
"0.5225042",
"0.5207724",
"0.52066725",
"0.52066725",
"0.52066725",
"0.52066725",
"0.5180766",
"0.51607865",
"0.5157088",
"0.5154193",
"0.51512814",
"0.51512814",
"0.5150737",
"0.51461434"
] |
0.72574925
|
0
|
Get or set if learning task is a time series.
|
def time_series(self) -> bool:
return self._time_series
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def has_t(self):\n return any(map(lambda s: s.is_temporal, self))",
"def is_timeseries(filepath):\n\n if os.path.isdir(os.path.dirname(filepath)):\n\n if len(os.listdir(os.path.dirname(filepath))) > 1:\n ts = True\n else:\n ts = False\n else:\n ts = None\n\n return ts",
"def is_temporal(self, e):\n if e in self.temporal:\n return True",
"def is_dataset(self):\n return self._dataset is not None",
"def is_temporal(axis):\n return (axis.lower() in temporal_axes)",
"def load_timeseries(timeseries_file, ts=\"roi\"):\n if (ts == \"roi\") or (ts == \"voxel\"):\n timeseries = np.load(timeseries_file)[\"roi\"]\n return timeseries\n else:\n print(\n \"You have not selected a valid timeseries type.\"\n + \"options are ts='roi' or ts='voxel'.\"\n )\n pass",
"def isdt(self):\n return self.Units.isreftime and self._subarray.dtype == _dtype_object",
"def is_task_stagnant(task):",
"def is_task(self):\n from .tasks import Task\n return isinstance(self, Task)",
"def is_tentative(self):\n return self.state == TrackState.Tentative",
"def is_tentative(self):\n return self.state == TrackState.Tentative",
"def task_type(self):\n pass",
"def _get_timeseries_class():\n global _timeseries_class\n if not _timeseries_class:\n from energyquantified.data import Timeseries\n _timeseries_class = Timeseries\n return _timeseries_class",
"def task(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"task\")",
"def is_tvar(x):\n return type(x) is T.TensorVariable",
"def discrete_time(self):\n return bool(self._ll_tree_sequence.get_discrete_time())",
"def is_training(self):\n return self.mode == \"train\"",
"def is_training(self):\n return self.mode == \"train\"",
"def time_to_target_training(self) -> str:\r\n # TODO: Figure out how to implement this.\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"time_to_target_training\"))\r\n return self._training_modes[0]",
"def graph_has_temporal(g):\n return any(any(edge.get(p) == 'time' for p in {'argmax', 'argmin', 'type'}) or 'num' in edge for edge in g.get('edgeSet', []))",
"def _IsTimeReplot( self ):\n return True",
"def to_timeseries(self, dataset_name, light=False):\n timeseries = tokio.timeseries.TimeSeries()\n timeseries.dataset_name = dataset_name\n\n try:\n dataset = self[dataset_name]\n except KeyError:\n # can't attach because dataset doesn't exist; pass this back to caller so it can init\n return None\n\n timeseries.dataset = dataset if light else dataset[:, :]\n\n # load and decode version of dataset and file schema\n timeseries.global_version = self['/'].attrs.get('version')\n timeseries.version = self.get_version(dataset_name)\n if isinstance(timeseries.version, bytes):\n timeseries.version = timeseries.version.decode()\n\n # copy columns into memory\n columns = self.get_columns(dataset_name)\n timeseries.set_columns(columns)\n\n # copy metadata into memory\n for key, value in dataset.attrs.items():\n if isinstance(value, bytes):\n timeseries.dataset_metadata[key] = value.decode()\n else:\n timeseries.dataset_metadata[key] = value\n for key, value in dataset.parent.attrs.items():\n if isinstance(value, bytes):\n timeseries.group_metadata[key] = value.decode()\n else:\n timeseries.group_metadata[key] = value\n\n timeseries.timestamp_key = get_timestamps_key(self, dataset_name)\n timeseries.timestamps = self[timeseries.timestamp_key]\n timeseries.timestamps = timeseries.timestamps if light else timeseries.timestamps[:]\n\n timeseries.timestep = timeseries.timestamps[1] - timeseries.timestamps[0]\n return timeseries",
"def has_TaskSet(self, desired_metadata):\n return bool(self._resolve_TaskSet(desired_metadata)) or self.fallback.has_TaskSet(desired_metadata)",
"def isSetTimeUnits(self):\n return _libsbml.Model_isSetTimeUnits(self)",
"def _get_ml_task(self):\n self._validate_ml_task()\n if self.ml_task == \"auto\":\n classes_number = self.n_classes\n if classes_number == 2:\n self._estimator_type = \"classifier\" # for sk-learn api\n return BINARY_CLASSIFICATION\n elif classes_number <= 20:\n self._estimator_type = \"classifier\" # for sk-learn api\n return MULTICLASS_CLASSIFICATION\n else:\n self._estimator_type = \"regressor\" # for sk-learn api\n return REGRESSION\n else:\n return deepcopy(self.ml_task)",
"def test_ensure_ts_ts(self):\n self.assertEqual(ensure_ts(self.jobset2), 'imaginary')",
"def is_already_tuned(task, log_filename):\n if not os.path.exists(log_filename):\n return False\n\n dispatch_context = tvm.autotvm.task.ApplyHistoryBest(log_filename)\n return dispatch_context._query_inside(task.target, task.workload)",
"def is_training(self):\n return self._labels_one_hot is not None",
"def model():\n return TimeSeriesMultiReg()",
"def detect_task_type(path):\n # distinguishing \"delay-response\" task or \"multi-target-licking\" task\n mat = spio.loadmat(path.as_posix(), squeeze_me=True, struct_as_record=False)\n GUI_fields = set(mat['SessionData'].SettingsFile.GUI._fieldnames)\n\n if ({'X_center', 'Y_center', 'Z_center'}.issubset(GUI_fields)\n and not {'SamplePeriod', 'DelayPeriod'}.issubset(GUI_fields)):\n task_type = 'multi-target-licking'\n else:\n task_type = 'delay-response'\n\n return task_type"
] |
[
"0.5922883",
"0.58697695",
"0.55525404",
"0.55141145",
"0.54814786",
"0.5361244",
"0.53277946",
"0.517447",
"0.51625425",
"0.51449305",
"0.51449305",
"0.5139298",
"0.51210666",
"0.51178837",
"0.5052959",
"0.49301153",
"0.49274877",
"0.49274877",
"0.49248353",
"0.49169722",
"0.4916285",
"0.48796412",
"0.48649183",
"0.4863648",
"0.48593372",
"0.4836133",
"0.48101115",
"0.4806306",
"0.48037887",
"0.47778064"
] |
0.667246
|
0
|
Get suitable hyperparameters ranges for a given task. The returning dictionary may contain other dictionaries in the values of a argument. This indicates that this is a optuna.trial method. The class _Objective will handle transforming this dictionary into a callable.
|
def _get_params_ranges(task: str,) -> Dict[str, Any]:
params_file = os.path.join(
os.path.dirname(__file__), "params", "xgboost.yml"
)
params = utils.read_yaml(params_file)
if "regression" in task.lower():
params.update({"objective": "reg:squarederror"})
return params
if "binary" in task.lower():
params.update({"objective": "binary:logistic"})
return params
raise ValueError(f"{task} is not a supported task.")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def objective(trial, \n bounds: Optional[Iterable]=None, \n func: Optional[Callable]=None, \n param_names: Optional[List[str]]=None):\n if param_names is None:\n param_names = PARAM_NAMES\n if (bounds is None):\n bounds = ((-10, 10) for _ in param_names)\n if not isinstance(bounds, dict):\n bounds = dict((p, (min(b), max(b))) \n for p, b in zip(param_names, bounds))\n if func is None:\n func = DEFAULT_METRIC_FUNC\n\n params = dict(\n (p, trial.suggest_float(p, bounds.get(p)[0], bounds.get(p)[1])) \n for p in param_names \n )\n # x = trial.suggest_float('x', -10, 10)\n return func((params[p] for p in param_names))",
"def hyperopt_func(model_dict, model_param_names, training_param_names, param_space, datasets, max_evals=30):\n tester = fitness(model_dict, model_param_names, training_param_names, datasets)\n trials = Trials()\n \n timer_start = timer()\n best = fmin(fn=tester.objective, \n space=param_space, \n algo=tpe.suggest, \n max_evals=max_evals, \n trials=trials, \n rstate=np.random.RandomState(50))\n timer_end = timer()\n print('Total training time (min):',(timer_end-timer_start)/60)\n results = sorted(trials.results, key = lambda x: x['loss'])\n return results",
"def get_hyperparams(self):",
"def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params",
"def evaluate_mapped_inputs(self,**kwargs):\n result = {}\n for v,t,o,p,n in zip(self.values,self.thresholds,self.operations,self.proportions,self.output_names):\n value = kwargs.get(v)\n if isinstance(t,basestring):\n threshold = kwargs.get(t)\n else:\n threshold = t\n if o == \"lt\":\n result[n] = (value < threshold * p)\n elif o == \"gt\":\n result[n] = (value > threshold * p)\n elif o == \"lte\":\n result[n] = (value <= threshold * p)\n elif o == \"gte\":\n result[n] = (value >= threshold * p)\n return result",
"def get_kwargs(d):\n return {\"range\": d.get(\"range\", None)}",
"def kwargs (self):\n return dict (bins=self.bins, range=self.range)",
"def kwargs (self):\n return dict (bins=self.bins, range=self.range)",
"def _eval_params(trial, params: Dict[str, Any]) -> Dict[str, Any]:\n prepared = dict()\n for arg, value in params.items():\n if isinstance(value, dict):\n # Extract method.\n name = list(value.keys())[0]\n # Add prefix.\n method = \"suggest_\" + name\n # Get method kwargs.\n kwargs = value[name]\n # Add name arg.\n kwargs.update({\"name\": arg})\n # Evaluate method.\n value = getattr(trial, method)(**kwargs)\n prepared.update({arg: value})\n return prepared",
"def make_base_dict(problem_tups, method_tups, **kwargs):\n nfunc_max_dict = kwargs.pop('nfunc_max_dict',\n {'gg': 5, 'ta': 5, 'nn': 10, 'ad': 5})\n y_sigma_2d = kwargs.pop('y_sigma_2d', 0.2)\n y_sigma_1d = kwargs.pop('y_sigma_1d', 0.07)\n x_sigma_1d = kwargs.pop('x_sigma_1d', 0.07)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n output = {}\n for fit_func, data_func, data_type in problem_tups:\n problem_key = get_problem_key(fit_func, data_func, data_type)\n output[problem_key] = {}\n # Get data\n if data_func.__name__[-2:] == '1d':\n y_error_sigma = y_sigma_1d\n x_error_sigma = x_sigma_1d\n else:\n y_error_sigma = y_sigma_2d\n x_error_sigma = None\n data = bsr.data.generate_data(\n data_func, data_type, y_error_sigma,\n x_error_sigma=x_error_sigma)\n for method_key in method_tups:\n output[problem_key][method_key] = {}\n adaptive = method_key[0]\n nfunc_list = get_nfunc_list(\n fit_func, adaptive, data, nfunc_max_dict)\n likelihood_list = []\n prior_list = []\n if fit_func.__name__ == 'adfam_gg_ta_1d' and not adaptive:\n for nfunc in nfunc_list:\n likelihood_list.append(bsr.likelihoods.FittingLikelihood(\n data, bf.gg_1d, nfunc, adaptive=adaptive))\n prior_list.append(bsr.priors.get_default_prior(\n bf.gg_1d, nfunc, adaptive=adaptive))\n for nfunc in nfunc_list:\n likelihood_list.append(bsr.likelihoods.FittingLikelihood(\n data, bf.ta_1d, nfunc, adaptive=adaptive))\n prior_list.append(bsr.priors.get_default_prior(\n bf.ta_1d, nfunc, adaptive=adaptive))\n elif fit_func.__name__ == 'nn_adl' and not adaptive:\n for nfunc in nfunc_list:\n likelihood_list.append(bsr.likelihoods.FittingLikelihood(\n data, nn.nn_1l, [nfunc[0], nfunc[-1]],\n adaptive=adaptive))\n prior_list.append(bsr.priors.get_default_prior(\n nn.nn_1l, [nfunc[0], nfunc[-1]], adaptive=adaptive))\n for nfunc in nfunc_list:\n likelihood_list.append(bsr.likelihoods.FittingLikelihood(\n data, nn.nn_2l, nfunc, adaptive=adaptive))\n prior_list.append(bsr.priors.get_default_prior(\n nn.nn_2l, nfunc, adaptive=adaptive))\n else:\n for nfunc in nfunc_list:\n # Make likelihood, prior and run func\n likelihood_list.append(bsr.likelihoods.FittingLikelihood(\n data, fit_func, nfunc, adaptive=adaptive))\n prior_list.append(bsr.priors.get_default_prior(\n fit_func, nfunc, adaptive=adaptive))\n output[problem_key][method_key]['nfunc_list'] = nfunc_list\n output[problem_key][method_key]['likelihood_list'] = \\\n likelihood_list\n output[problem_key][method_key]['prior_list'] = prior_list\n return output",
"def get_map_task_params(self):\n return {}",
"def get_params(self):\n return {'threshold': self.threshold,\n 'subsample': self.subsample,\n 'estimator': self.estimator,\n 'n_folds': self.n_folds,\n 'stratify': self.stratify,\n 'random_state': self.random_state,\n 'n_jobs': self.n_jobs}",
"def hyperparameters(self) -> dict[str, Any]:\n return self.config[\"hyperparameters\"]",
"def __call__(self, hyperparameters: dict) -> dict:\n result = self.perturb(hyperparameters)\n\n if random.random() < self.resample_probability:\n result = self.resample(result)\n\n return result",
"def range_params(self, ran, kw):\n specs = {\"range\": (SchemaNode(\"value\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minInclusive\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxInclusive\")),\n \"length\": (SchemaNode(\"param\").set_attr(\"name\",\"length\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minLength\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxLength\"))}\n (exact, min_, max_) = specs[kw]\n if (len(ran) == 1 or ran[0] == ran[1]) and ran[0][0] != \"m\":\n elem = exact\n elem.text = ran[0]\n return [elem]\n res = []\n if ran[0][0] != \"m\":\n elem = min_\n elem.text = ran[0]\n res.append(elem)\n if ran[1][0] != \"m\":\n elem = max_\n elem.text = ran[1]\n res.append(elem)\n return res",
"def bs_parameters(T_min, T_max, num):\n T_array = np.linspace(T_min, T_max, num)\n t_array = np.sqrt(T_array)\n rf = np.vectorize(lambda t: sqrt(1 - pow(t, 2)))\n r_array = rf(t_array)\n return t_array, r_array",
"def get_ranges(self, tchain, kw):\n (lo, hi) = (\"min\", \"max\")\n ran = None\n for t in tchain:\n rstmt = t.search_one(kw)\n if rstmt is None: continue\n ran = [ i.split(\"..\") for i in rstmt.arg.split(\"|\") ]\n if ran[0][0] != 'min': lo = ran[0][0]\n if ran[-1][-1] != 'max': hi = ran[-1][-1]\n if ran is None: return None\n if len(ran) == 1:\n return [(lo, hi)]\n else:\n return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)]",
"def run_hyperopt(self, max_eval, space):\n # Reset run parameters\n self._max_eval = max_eval\n self._results = {}\n self._eval_idx = 0\n\n # Hyperopt is picky about the function handle\n def model_handle(params):\n return self.model(params)\n\n # Run the hyperparameter optimization\n _ = fmin(fn=model_handle, space=space, algo=tpe.suggest, max_evals=max_eval)\n return self._results",
"def _sample_hyperparameters(self):\n\t\tconfig = {}\n\t\tfor attr, option in self._config_options.items():\n\t\t\tprint('Sampling', attr)\n\t\t\tconfig[attr] = option.sample()\n\t\treturn config",
"def suggest(suggestions):\n weight_sum = sum(suggestions.values())\n prob_ranges = []\n lower_bound = 0.0\n\n # generate probability ranges\n for task, weight in suggestions.iteritems():\n upper_bound = lower_bound + weight / weight_sum\n prob_ranges.append((task, (lower_bound, upper_bound)))\n\n # update lower bound\n lower_bound = upper_bound\n\n rand_number = random.random()\n\n for task, (low, high) in prob_ranges:\n if low <= rand_number < high:\n return task\n\n raise AssertionError('Should not be here. O_O');",
"def compute_optimalReward(task):\n\n\tT = 15.0\n\tweights = 0\n\tif task == TABLE_TASK or task == COFFEE_TASK:\n\t\tweights = 1\n\telif task == LAPTOP_TASK:\n\t\tweights = 10\n\n\t# initialize start/goal based on task \n\tif task == COFFEE_TASK or task == HUMAN_TASK:\n\t\tpick = pick_shelf\n\telse:\n\t\tpick = pick_basic\n\n\tif task == LAPTOP_TASK:\n\t\tplace = place_higher\n\telse:\n\t\tplace = place_lower\n\t\t\n\tstartRad = np.array(pick)*(math.pi/180.0)\n\tgoalRad = np.array(place)*(math.pi/180.0)\n\tstart = startRad\n\tgoal = goalRad\n\n\tplan = Planner(task)\t\n\tfilename = None\n\tif task == 1:\n\t\tfilename = \"task1.csv\"\n\telif task == 2:\n\t\tfilename = \"task2.csv\"\n\telif task == 3:\n\t\tfilename = \"task3.csv\"\n\t\t\n\t# get optimal waypts from file\n\twaypts = get_opt_waypts(filename)\n\tr = plan.featurize(waypts)\n\tRvel = r[0]\n\tRfeat = np.sum(r[1])\n\n\tplan.kill_planner()\n\treturn (Rvel, Rfeat)",
"def init_population(self, task):\n if task.max_iters != np.inf:\n total_candidates = task.max_iters\n elif task.max_evals != np.inf:\n total_candidates = task.max_evals\n else:\n total_candidates = 0\n self.candidates = []\n x = None\n for i in range(total_candidates):\n while True:\n x = task.lower + task.range * self.random(task.dimension)\n if not np.any([np.all(a == x) for a in self.candidates]):\n self.candidates.append(x)\n break\n\n x_fit = task.eval(self.candidates[0])\n return x, x_fit, {}",
"def __call__(self, hyperparameters: dict) -> dict:\n result = hyperparameters.copy()\n\n for key, value in self.mutations.items():\n result[key] = value() if callable(value) else random.choice(value)\n\n return result",
"def hyperparameter_tunning(\n model,\n train_features: pd.DataFrame,\n train_target: pd.Series,\n validation_features: pd.DataFrame,\n validation_target: pd.Series,\n hyperparameter_grid: dict\n) -> dict:\n\n best_estimator = None\n best_hyperparams = {}\n best_metric = 0.0\n\n hp_grid = [this_hp for this_hp in hyperparameter_grid.values()]\n all_combinations_list = list(itertools.product(*hp_grid))\n\n all_combinations_dic = []\n\n for this_combination in all_combinations_list:\n\n this_hp_set = {}\n\n for i, key in enumerate(hyperparameter_grid.keys()):\n\n this_hp_set[key] = this_combination[i]\n\n all_combinations_dic.append(this_hp_set)\n\n for this_hp_set in all_combinations_dic:\n\n this_estimator = model(**this_hp_set)\n\n this_estimator.fit(train_features, train_target)\n\n predictions = this_estimator.predict(validation_features)\n\n evaluation_metric = f1_score(validation_target, predictions)\n\n if evaluation_metric > best_metric:\n\n best_metric = evaluation_metric\n\n best_estimator = this_estimator\n\n best_hyperparams = this_hp_set\n\n return {'best_hyperparameters': best_hyperparams, 'best_model': best_estimator, 'best_metric': best_metric}",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['range'] = self.range\n return paramDict\n # no other additional parameters required",
"def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range",
"def Params(cls):\n p = hyperparams.InstantiableParams(cls)\n p.Define('task', None, 'Underlying task')\n p.Define('logdir', None, 'Log directory')\n p.Define('num_splits_per_client', None, '')\n p.Define('steps_per_loop', None, 'Number of steps to run.')\n p.Define('dataset_name', None,\n 'Dataset the program is operating on, eg: \"Test\"')\n p.Define('name', 'base_program', 'Program name.')\n p.Define('task_name', None,\n 'If multi-task, what the high-level task name is')\n p.Define('num_threads', 1, 'Number of threads in multiprocessing pool.')\n p.Define('spmd', False, 'Whether program is running under SPMD mode.')\n p.Define('write_train_input_stats', False,\n 'Whether to write input data stats during training.')\n p.Define('max_metrics', 256, 'Overrides TpuEvalMetrics.max_metrics')\n p.Define('ml_perf', None, 'MLPerf config')\n return p",
"def range_parameter_to_dict(parameter: RangeParameter) -> Dict[str, Any]:\n return {\n \"__type\": parameter.__class__.__name__,\n \"name\": parameter.name,\n \"parameter_type\": parameter.parameter_type,\n \"lower\": parameter.lower,\n \"upper\": parameter.upper,\n \"log_scale\": parameter.log_scale,\n \"logit_scale\": parameter.logit_scale,\n \"digits\": parameter.digits,\n \"is_fidelity\": parameter.is_fidelity,\n \"target_value\": parameter.target_value,\n }",
"def evaluate(self, task, **kwargs):\n self.solver.clear()\n\n func_name = task.replace(\" \", \"_\")\n if not hasattr(self, func_name):\n raise ValueError(\"Unknown task `%s`\" % task)\n logger.info(\"evaluate on %s\" % task)\n result = getattr(self, func_name)(**kwargs)\n for metric, value in sorted(result.items()):\n logger.warning(\"%s: %g\" % (metric, value))",
"def get_new_suggestions(self, study, trials=[], number=1):\n # Construct search space, example: {\"x\": hyperopt.hp.uniform('x', -10, 10), \"x2\": hyperopt.hp.uniform('x2', -10, 10)}\n hyperopt_search_space = {}\n\n # study = Study.objects.get(name=study_name)\n study_configuration_json = json.loads(study.study_configuration)\n params = study_configuration_json[\"params\"]\n\n for param in params:\n param_name = param[\"parameterName\"]\n\n if param[\"type\"] == \"INTEGER\":\n # TODO: Support int type of search space)\n pass\n\n elif param[\"type\"] == \"DOUBLE\":\n hyperopt_search_space[param_name] = hyperopt.hp.uniform(\n param_name, param[\"minValue\"], param[\"maxValue\"]\n )\n\n elif param[\"type\"] == \"DISCRETE\" or param[\"type\"] == \"CATEGORICAL\":\n feasible_point_list = [\n value.strip() for value in param[\"feasiblePoints\"].split(\",\")\n ]\n hyperopt_search_space[param_name] = hyperopt.hp.choice(\n param_name, feasible_point_list\n )\n\n # New hyperopt variables\n hyperopt_rstate = np.random.RandomState(random.randint(1, 2 ** 31 - 1))\n hyperopt_domain = hyperopt.Domain(\n None, hyperopt_search_space, pass_expr_memo_ctrl=None\n )\n\n hyperopt_trial_specs = []\n hyperopt_trial_results = []\n # Example: # Example: [{'tid': 0, 'idxs': {'l1_normalization': [0], 'learning_rate': [0], 'hidden2': [0], 'optimizer': [0]}, 'cmd': ('domain_attachment', 'FMinIter_Domain'), 'vals': {'l1_normalization': [0.1], 'learning_rate': [0.1], 'hidden2': [1], 'optimizer': [1]}, 'workdir': None}]\n hyperopt_trial_miscs = []\n hyperopt_trial_new_ids = []\n\n # Update hyperopt for trained trials with completed advisor trials\n completed_hyperopt_trials = hyperopt.Trials()\n\n # completed_advisor_trials = Trial.objects.filter(\n # study_name=study_name, status=\"Completed\")\n completed_advisor_trials = [i for i in trials if i.status == \"Completed\"]\n\n for index, advisor_trial in enumerate(completed_advisor_trials):\n # Example: {\"learning_rate\": 0.01, \"optimizer\": \"ftrl\"}\n parameter_values_json = json.loads(advisor_trial.parameter_values)\n\n # Example: {'l1_normalization': [0], 'learning_rate': [0], 'hidden2': [0], 'optimizer': [0]}\n hyperopt_trial_miscs_idxs = {}\n # Example: {'l1_normalization': [0.1], 'learning_rate': [0.1], 'hidden2': [1], 'optimizer': [1]}\n hyperopt_trial_miscs_vals = {}\n new_id = index\n hyperopt_trial_new_ids.append(new_id)\n hyperopt_trial_misc = dict(\n tid=new_id, cmd=hyperopt_domain.cmd, workdir=hyperopt_domain.workdir\n )\n\n for param in params:\n\n if param[\"type\"] == \"INTEGER\":\n pass\n\n elif param[\"type\"] == \"DOUBLE\":\n parameter_value = parameter_values_json[param[\"parameterName\"]]\n hyperopt_trial_miscs_idxs[param[\"parameterName\"]] = [index]\n hyperopt_trial_miscs_vals[param[\"parameterName\"]] = [\n parameter_value\n ]\n\n elif param[\"type\"] == \"DISCRETE\":\n feasible_points_string = param[\"feasiblePoints\"]\n feasible_points = [\n float(value.strip())\n for value in feasible_points_string.split(\",\")\n ]\n parameter_value = parameter_values_json[param[\"parameterName\"]]\n index_of_value_in_list = feasible_points.index(parameter_value)\n hyperopt_trial_miscs_idxs[param[\"parameterName\"]] = [index]\n hyperopt_trial_miscs_vals[param[\"parameterName\"]] = [\n index_of_value_in_list\n ]\n\n elif param[\"type\"] == \"CATEGORICAL\":\n feasible_points_string = param[\"feasiblePoints\"]\n feasible_points = [\n value.strip() for value in feasible_points_string.split(\",\")\n ]\n # Example: \"ftrl\"\n parameter_value = parameter_values_json[param[\"parameterName\"]]\n index_of_value_in_list = feasible_points.index(parameter_value)\n hyperopt_trial_miscs_idxs[param[\"parameterName\"]] = [index]\n hyperopt_trial_miscs_vals[param[\"parameterName\"]] = [\n index_of_value_in_list\n ]\n\n hyperopt_trial_specs.append(None)\n\n hyperopt_trial_misc[\"idxs\"] = hyperopt_trial_miscs_idxs\n hyperopt_trial_misc[\"vals\"] = hyperopt_trial_miscs_vals\n hyperopt_trial_miscs.append(hyperopt_trial_misc)\n\n # TODO: Use negative objective value for loss or not\n\n loss_for_hyperopt = advisor_trial.objective_value\n if study_configuration_json[\"goal\"] == \"MAXIMIZE\":\n # Now hyperopt only supports fmin and we need to reverse objective value for maximization\n loss_for_hyperopt = -1 * advisor_trial.objective_value\n\n hyperopt_trial_result = {\n \"loss\": loss_for_hyperopt,\n \"status\": hyperopt.STATUS_OK,\n }\n hyperopt_trial_results.append(hyperopt_trial_result)\n\n if len(completed_advisor_trials) > 0:\n # Example: {'refresh_time': datetime.datetime(2018, 9, 18, 12, 6, 41, 922000), 'book_time': datetime.datetime(2018, 9, 18, 12, 6, 41, 922000), 'misc': {'tid': 0, 'idxs': {'x2': [0], 'x': [0]}, 'cmd': ('domain_attachment', 'FMinIter_Domain'), 'vals': {'x2': [-8.137088361136204], 'x': [-4.849028446711832]}, 'workdir': None}, 'state': 2, 'tid': 0, 'exp_key': None, 'version': 0, 'result': {'status': 'ok', 'loss': 14.849028446711833}, 'owner': None, 'spec': None}\n hyperopt_trials = completed_hyperopt_trials.new_trial_docs(\n hyperopt_trial_new_ids,\n hyperopt_trial_specs,\n hyperopt_trial_results,\n hyperopt_trial_miscs,\n )\n for current_hyperopt_trials in hyperopt_trials:\n current_hyperopt_trials[\"state\"] = hyperopt.JOB_STATE_DONE\n\n completed_hyperopt_trials.insert_trial_docs(hyperopt_trials)\n completed_hyperopt_trials.refresh()\n\n rval = hyperopt.FMinIter(\n self.hyperopt_algorithm,\n hyperopt_domain,\n completed_hyperopt_trials,\n max_evals=-1,\n rstate=hyperopt_rstate,\n verbose=0,\n )\n rval.catch_eval_exceptions = False\n\n new_ids = rval.trials.new_trial_ids(number)\n\n rval.trials.refresh()\n\n random_state = rval.rstate.randint(2 ** 31 - 1)\n new_trials = self.hyperopt_algorithm(\n new_ids, rval.domain, completed_hyperopt_trials, random_state\n )\n rval.trials.refresh()\n\n # Construct return advisor trials from new hyperopt trials\n return_trial_list = []\n\n for i in range(number):\n\n # Example: {u'hidden2': [2], u'learning_rate': [0.04633366105812467], u'l1_normalization': [0.16858448611765364], u'optimizer': [3]}\n vals = new_trials[i][\"misc\"][\"vals\"]\n\n new_advisor_trial = Trial.create(study.name, \"TpeTrial\")\n parameter_values_json = {}\n\n for param in params:\n\n if param[\"type\"] == \"INTEGER\":\n pass\n\n elif param[\"type\"] == \"DOUBLE\":\n suggest_value = vals[param[\"parameterName\"]][0]\n parameter_values_json[param[\"parameterName\"]] = suggest_value\n\n elif param[\"type\"] == \"DISCRETE\":\n feasible_point_list = [\n float(value.strip())\n for value in param[\"feasiblePoints\"].split(\",\")\n ]\n suggest_index = vals[param[\"parameterName\"]][0]\n suggest_value = feasible_point_list[suggest_index]\n\n elif param[\"type\"] == \"CATEGORICAL\":\n feasible_point_list = [\n value.strip() for value in param[\"feasiblePoints\"].split(\",\")\n ]\n suggest_index = vals[param[\"parameterName\"]][0]\n suggest_value = feasible_point_list[suggest_index]\n\n parameter_values_json[param[\"parameterName\"]] = suggest_value\n\n new_advisor_trial.parameter_values = json.dumps(parameter_values_json)\n # new_advisor_trial.save()\n return_trial_list.append(new_advisor_trial)\n\n return return_trial_list"
] |
[
"0.591799",
"0.5714507",
"0.56155163",
"0.56040365",
"0.54333824",
"0.5401873",
"0.53382957",
"0.53382957",
"0.51623964",
"0.51542753",
"0.51444685",
"0.51438236",
"0.5132472",
"0.5115702",
"0.5096597",
"0.5092274",
"0.5077159",
"0.50753367",
"0.5072571",
"0.50602084",
"0.50583476",
"0.5057152",
"0.50462675",
"0.5036646",
"0.5014067",
"0.5003216",
"0.49865633",
"0.49726176",
"0.49562243",
"0.4944131"
] |
0.717844
|
0
|
Compute impact of an hazard to exposures.
|
def calc_mortality(impact, key, exposures, impact_funcs, hazard, kanton, save_mat=False):
# 1. Assign centroids to each exposure if not done
assign_haz = INDICATOR_CENTR + hazard.tag.haz_type
if assign_haz not in exposures:
exposures.assign_centroids(hazard)
else:
LOGGER.info('Exposures matching centroids found in %s', assign_haz)
# 2. Initialize values
impact.unit = exposures.value_unit
impact.event_id = hazard.event_id
impact.event_name = hazard.event_name
impact.date = hazard.date
impact.coord_exp = np.stack([exposures.latitude.values,
exposures.longitude.values], axis=1)
impact.frequency = hazard.frequency
impact.at_event = np.zeros(hazard.intensity.shape[0])
impact.eai_exp = np.zeros(exposures.value.size)
impact.tag = {'exp': exposures.tag, 'if_set': impact_funcs.tag,
'haz': hazard.tag}
impact.crs = exposures.crs
# Select exposures with positive value and assigned centroid
exp_idx = np.where(np.logical_and(exposures.value > 0, \
exposures[assign_haz] >= 0))[0]
if exp_idx.size == 0:
LOGGER.warning("No affected exposures.")
num_events = hazard.intensity.shape[0]
LOGGER.info('Calculating damage for %s assets (>0) and %s events.',
exp_idx.size, num_events)
# Get damage functions for this hazard
if_haz = INDICATOR_IF + hazard.tag.haz_type
haz_imp = impact_funcs.get_func(hazard.tag.haz_type)
if if_haz not in exposures and INDICATOR_IF not in exposures:
LOGGER.error('Missing exposures impact functions %s.', INDICATOR_IF)
raise ValueError
if if_haz not in exposures:
LOGGER.info('Missing exposures impact functions for hazard %s. ' + \
'Using impact functions in %s.', if_haz, INDICATOR_IF)
if_haz = INDICATOR_IF
# Check if deductible and cover should be applied
insure_flag = False
if ('deductible' in exposures) and ('cover' in exposures) \
and exposures.cover.max():
insure_flag = True
if save_mat:
impact.imp_mat = sparse.lil_matrix((impact.date.size, exposures.value.size))
# 3. Loop over exposures according to their impact function
tot_exp = 0
for imp_fun in haz_imp:
# get indices of all the exposures with this impact function
exp_iimp = np.where(exposures[if_haz].values[exp_idx] == imp_fun.id)[0]
tot_exp += exp_iimp.size
exp_step = int(CONFIG['global']['max_matrix_size'] / num_events)
if not exp_step:
LOGGER.error('Increase max_matrix_size configuration parameter'
' to > %s', str(num_events))
raise ValueError
# separte in chunks
chk = -1
for chk in range(int(exp_iimp.size / exp_step)):
exp_impact_mortality(impact, \
exp_idx[exp_iimp[chk * exp_step:(chk + 1) * exp_step]], \
exposures, key, hazard, imp_fun, insure_flag, kanton)
exp_impact_mortality(impact, exp_idx[exp_iimp[(chk + 1) * exp_step:]], \
exposures, key, hazard, imp_fun, insure_flag, kanton)
if not tot_exp:
LOGGER.warning('No impact functions match the exposures.')
impact.aai_agg = sum(impact.at_event * hazard.frequency)
if save_mat:
impact.imp_mat = impact.imp_mat.tocsr()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def exp_impact_mortality(impact, exp_iimp, exposures, key, hazard, imp_fun, insure_flag, kanton):\r\n if not exp_iimp.size:\r\n return \r\n \r\n if kanton is None:\r\n kanton_name = 'CH'\r\n else:\r\n kanton_name = kanton\r\n \r\n directory = '../../input_data/impact_functions/'\r\n \r\n annual_deaths = pd.read_excel(''.join([directory, 'annual_deaths.xlsx']), sheet_name = key)\r\n # file containing the number of annual deaths per CH / Canton for each age category\r\n \r\n # PREPROCESSING STEP:\r\n \r\n # get assigned centroids\r\n icens = exposures[INDICATOR_CENTR + hazard.tag.haz_type].values[exp_iimp]\r\n # get affected intensities\r\n temperature_matrix = hazard.intensity[:, icens] # intensity of the hazard\r\n # get affected fractions\r\n fract = hazard.fraction[:, icens] # frequency of the hazard\r\n # get exposure values\r\n exposure_values = exposures.value.values[exp_iimp] \r\n\r\n expected_deaths = {}\r\n daily_deaths = annual_deaths[annual_deaths['Canton'] == kanton_name]['Annual_deaths'].values[0] / 365\r\n max_temp = temperature_matrix.max()\r\n for value in range(22, int(np.ceil(max_temp)) + 1):\r\n expected_deaths[value] = daily_deaths / imp_fun.calc_mdr(value)\r\n #print(expected_deaths)\r\n\r\n # Compute impact matrix\r\n matrix = impact_mortality(temperature_matrix, exposure_values, icens, expected_deaths, imp_fun, fract.shape)\r\n\r\n if insure_flag and matrix.nonzero()[0].size:\r\n inten_val = hazard.intensity[:, icens].todense()\r\n paa = np.interp(inten_val, imp_fun.intensity, imp_fun.paa)\r\n matrix = np.minimum(np.maximum(matrix - \\\r\n exposures.deductible.values[exp_iimp] * paa, 0), \\\r\n exposures.cover.values[exp_iimp])\r\n impact.eai_exp[exp_iimp] += np.sum(np.asarray(matrix) * \\\r\n hazard.frequency.reshape(-1, 1), axis=0)\r\n else:\r\n impact.eai_exp[exp_iimp] += np.squeeze(np.asarray(np.sum( \\\r\n matrix.multiply(hazard.frequency.reshape(-1, 1)), axis=0)))\r\n\r\n impact.at_event += np.squeeze(np.asarray(np.sum(matrix, axis=1)))\r\n impact.tot_value += np.sum(exposures.value.values[exp_iimp])\r\n if not isinstance(impact.imp_mat, list):\r\n impact.imp_mat[:, exp_iimp] = matrix",
"def deposited_exposure_between_bounds(self, time1: float, time2: float) -> _VectorisedFloat:\n deposited_exposure: _VectorisedFloat = 0.\n for interaction in self.short_range:\n start, stop = interaction.extract_between_bounds(time1, time2)\n short_range_jet_exposure = interaction._normed_jet_exposure_between_bounds(\n self.concentration_model, start, stop)\n short_range_lr_exposure = interaction._normed_interpolated_longrange_exposure_between_bounds(\n self.concentration_model, start, stop)\n dilution = interaction.dilution_factor()\n\n fdep = interaction.expiration.particle.fraction_deposited(evaporation_factor=1.0)\n diameter = interaction.expiration.particle.diameter\n \n # Aerosols not considered given the formula for the initial\n # concentration at mouth/nose.\n if diameter is not None and not np.isscalar(diameter):\n # We compute first the mean of all diameter-dependent quantities\n # to perform properly the Monte-Carlo integration over\n # particle diameters (doing things in another order would\n # lead to wrong results for the probability of infection).\n this_deposited_exposure = (np.array(short_range_jet_exposure\n * fdep).mean()\n - np.array(short_range_lr_exposure * fdep).mean()\n * self.concentration_model.infected.activity.exhalation_rate)\n else:\n # In the case of a single diameter or no diameter defined,\n # one should not take any mean at this stage.\n this_deposited_exposure = (short_range_jet_exposure * fdep\n - short_range_lr_exposure * fdep\n * self.concentration_model.infected.activity.exhalation_rate)\n\n # Multiply by the (diameter-independent) inhalation rate\n deposited_exposure += (this_deposited_exposure *\n interaction.activity.inhalation_rate\n /dilution)\n\n # Then we multiply by diameter-independent quantities: viral load\n # and fraction of infected virions\n f_inf = self.concentration_model.infected.fraction_of_infectious_virus()\n deposited_exposure *= (f_inf\n * self.concentration_model.virus.viral_load_in_sputum\n * (1 - self.exposed.mask.inhale_efficiency()))\n # Long-range concentration\n deposited_exposure += self.long_range_deposited_exposure_between_bounds(time1, time2)\n\n return deposited_exposure",
"def _impact_detect(phase, start_move, end_move, grf, acc_hip_z, acc_hip_x, acc_hip):\n min_air_time = ct.min_air_time\n imp_len = ct.imp_len # smallest impact window\n for i, j in zip(start_move, end_move):\n if j - i < min_air_time:\n phase[i:j] = 0\n else:\n grf_sub = grf[i:j]\n ranges, lengths = get_ranges(grf_sub, 1, True)\n for imp, length in zip(ranges, lengths):\n imp += i\n if (imp[0] != i and # can't impact from start\n # length >= imp_len and # make sure impact is of enough length\n phase[imp[0] - 1] == PhaseId.air.value): # has to be in air right before impact\n if imp[1] == len(phase):\n imp[1] -= 1\n if imp[1] - imp[0] < 50:\n acc_hip_z_step = acc_hip_z[imp[0]:imp[1]]\n acc_hip_x_step = acc_hip_x[imp[0]:imp[1]]\n _update_impact_start_jog(imp, acc_hip_z_step, acc_hip_x_step)\n acc_hip_step = acc_hip[imp[0]:imp[1] + 5]\n _update_impact_end_jog(imp, acc_hip_step)\n if imp[1] - imp[0] > imp_len:\n phase[imp[0]: imp[1]] = PhaseId.impact.value\n\n _detect_takeoff(phase)",
"def _calc_indirect_effect(x, y, m):\n x = stats.zscore(x)\n y = stats.zscore(y)\n m = stats.zscore(m)\n direct_effect = sm.OLS(y, sm.add_constant(x)).fit().params[1]\n xs = np.stack((x, m), axis=1)\n remaining_effect = sm.OLS(y, sm.add_constant(xs)).fit().params[1]\n indirect_effect = direct_effect - remaining_effect\n proportion_mediated = 1 - remaining_effect/direct_effect\n return indirect_effect, proportion_mediated",
"def impact_update(self, impact_surfaces, impact_state): \n # Set MVI state 2 to impact state.\n self._s2 = (impact_state['t'], impact_state['q'], impact_state['p'])\n self._tau1 = TAU(self)\n\n # Apply the discrete impact map.\n dis = discrete_plastic_impact(self, impact_surfaces)\n \n # Add constraints to the system.\n add_constraints(self, impact_surfaces)",
"def re_estimate_emission(self, x):\n with tf.name_scope('update_emissions'):\n u_x = tf.multiply(\n tf.math.exp(\n self.fb_array), tf.expand_dims(\n x, 1)) # pg 73: uj(t)*x(t)\n\n # Calculate means\n emission_score_log = tf.math.log(tf.math.reduce_sum(u_x, 0))\n denom = tf.math.reduce_logsumexp(self.fb_array, 0)\n means_log = emission_score_log - denom\n means = tf.math.exp(means_log)\n\n # Calculate standard deviations\n # TODO(kmcmanus): vectorize more\n new_stds = []\n for i in range(self.S):\n # (x_j - new_mean_state_i)**2\n variance_array = (x - means[i])**2\n # (prob_in_state_i_at_obj_j) * (x_j - new_mean_state_i)**2\n variance_array_x = tf.multiply(tf.math.exp(\n self.fb_array[:, i]), variance_array) # not logs\n # sum the above\n variance_score = tf.math.reduce_sum(variance_array_x, 0)\n new_var = variance_score / tf.math.exp(denom[i])\n new_std = tf.math.sqrt(new_var)\n new_stds.append(new_std)\n\n new_emissions = tfp.distributions.Normal(loc=means, scale=new_stds)\n\n return new_emissions",
"def calculate_advantage(stage_0, stage_1):\n # Improvement in hp difference is good.\n hp_pct_0 = (float(stage_0.friendly_life)/MAX_FRIENDLY_LIFE) - (float(stage_0.enemy_life)/MAX_ENEMY_LIFE)\n hp_pct_1 = (float(stage_1.friendly_life)/MAX_FRIENDLY_LIFE) - (float(stage_1.enemy_life)/MAX_ENEMY_LIFE)\n return hp_pct_1 - hp_pct_0",
"def cumulative_hazard(self, t, t_section):\n λ = torch.exp(self.logλ)\n\n # cumulative hazard\n cum_hazard = λ * self.widths\n cum_hazard = cum_hazard.cumsum(0)\n cum_hazard = self.prepend_zero(cum_hazard)\n cum_hazard_sec = cum_hazard[t_section]\n\n δ_t = t - self.breakpoints[t_section]\n\n return cum_hazard_sec + λ[t_section] * δ_t",
"def __calcToonImpact(self, trajectory):\n\n assert self.notify.debug('__calcToonImpact')\n\n # calculate when the toon will hit the ground\n # (assume absolute lowest point of terrain is above GROUND_PLANE_MIN)\n t_groundImpact = trajectory.checkCollisionWithGround(GROUND_PLANE_MIN)\n if t_groundImpact >= trajectory.getStartTime():\n # toon will hit the ground (...he'd best...)\n return t_groundImpact, self.HIT_GROUND\n else:\n # toon won't hit the ground??\n self.notify.error(\"__calcToonImpact: toon never impacts ground?\")\n # return something\n return 0.0, self.HIT_GROUND",
"def exposure(self):\n\n # define a range of declination to evaluate the\n # exposure at\n self.declination = np.linspace(-np.pi/2, np.pi/2, self.num_points)\n\n m = np.asarray([m_dec(d, self.params) for d in self.declination])\n \n # normalise to a maximum at 1\n self.exposure_factor = (m / m_dec(-np.pi/2, self.params))\n\n # find the point at which the exposure factor is 0\n self.limiting_dec = Angle((self.declination[m == 0])[0], 'rad')",
"def hazard(cls, *args):\n return cls.path_finder('hazard', *args)",
"def hazard(cls, *args):\n return cls.path_finder('hazard', *args)",
"def do_damage(self) -> float:\n res = 0.05 + self.experience / 100\n self.experience = self.experience + 1\n return res",
"def damageSubtractor(self, damage, target, caller):\n # Build the target av objects\n target_shield_value = target.db.shield_value # Applied conditionally\n target_armor = target.db.armor\n target_tough = target.db.tough\n target_armor_specialist = target.db.armor_specialist\n\n # Apply damage in order\n if target_shield_value:\n # Get value of shield damage to check if it's under 0. Need to pass\n # this on to armor\n shield_damage = target_shield_value - damage\n if shield_damage < 0:\n # Check if damage would make shield go below 0\n damage = abs(shield_damage)\n # Set shield_value to 0\n target.db.shield_value = 0\n # Recalc and set av with new shield value\n else:\n target.db.shield_value = shield_damage\n damage = 0\n\n if target_armor_specialist and damage:\n # Get value of damage\n armor_specialist_damage = target_armor_specialist - damage\n if armor_specialist_damage < 0:\n damage = abs(armor_specialist_damage)\n target.db.armor_specialist = 0\n else:\n target.db.armor_specialist = armor_specialist_damage\n damage = 0\n\n if target_armor and damage:\n # Get value of damage\n armor_damage = target_armor - damage\n if armor_damage < 0:\n damage = abs(armor_damage)\n target.db.armor = 0\n else:\n target.db.armor = armor_damage\n damage = 0\n\n if target_tough and damage:\n tough_damage = target_tough - damage\n if tough_damage < 0:\n damage = abs(tough_damage)\n target.db.tough = 0\n else:\n target.db.tough = tough_damage\n damage = 0\n else:\n self.deathSubtractor(damage, target, caller)\n\n new_av = self.updateArmorValue(target.db.shield_value, target.db.armor, target.db.tough, target.db.armor_specialist)\n\n return new_av",
"def _laststate_update(self, exo_dataframe, swarm_exposure, costs=None):\n if self._swarm_series is None or len(self._swarm_series) <= 1:\n raise ValueError(\"Improperly initiated error: self._swarm_series is None \"\n \"or len(self._equity) <= 1 \")\n\n if len(swarm_exposure) == 0:\n warnings.warn(\"Swarm ({0}) exposure is zero-length, seems that no members picked after rebalancing.\".format(self.name))\n\n # 1. Filter exo_price and swarm_exposure >= self.last_date\n _exo_price_array = exo_dataframe['exo'][exo_dataframe.index >= self.last_date]\n _exo_delta_array = None\n if 'delta' in exo_dataframe:\n _exo_delta_array = exo_dataframe['delta'][exo_dataframe.index >= self.last_date]\n _swarm_exposure = swarm_exposure[swarm_exposure.index >= self.last_date]\n\n if len(_swarm_exposure) > 0 and len(_exo_price_array) != len(_swarm_exposure):\n raise ValueError(\"len(_swarm_exposure) > 0 and len(_exo_price_array) != len(_swarm_exposure)\")\n\n\n for i in range(len(_exo_price_array)):\n # Do sanity checks\n # Check that date index matches\n _costs_value = 0.0\n delta_value = 0.0\n _exposure = 0.0\n\n if len(_swarm_exposure) > 0:\n if _exo_price_array.index[i] != _swarm_exposure.index[i]:\n raise ValueError(\"_exo_price_array.index[i] != _swarm_exposure.index[i]\")\n _exposure = _swarm_exposure.values[i]\n else:\n if self._last_date == _exo_price_array.index[i]:\n _exposure = self.last_prev_exposure\n\n # We have new quote data\n # Similar to backtester_fast.stats_exposure() backtesting algorithm\n if i == 0:\n # usually PnL for 1-st day will be 0.0, but in case when EXO price recalculated we need to do adjustments\n profit = (_exo_price_array.values[i] - self.last_exoquote) * self._last_prev_exposure\n\n # Don't calculate costs at first day (it's assumed that costs already included in price)\n if self._last_date != _exo_price_array.index[i]:\n if costs is not None:\n _costs_value = calc_costs(costs['transaction_costs'].values[i],\n costs['rollover_costs'].values[i],\n self._last_prev_exposure, # Prev Exposure\n _exposure) # Current Exposure\n profit += _costs_value\n else:\n profit = (_exo_price_array.values[i] - _exo_price_array.values[i-1]) * self._last_exposure\n if costs is not None:\n _costs_value = calc_costs(costs['transaction_costs'].values[i],\n costs['rollover_costs'].values[i],\n self.last_exposure, # Prev Exposure\n _exposure) # Current Exposure\n profit += _costs_value\n\n # Updating swarm delta value if it exists in EXO dataframe\n\n if _exo_delta_array is not None:\n delta_value = _exo_delta_array.values[i] * _exposure\n\n\n # Use previous exposure to calculate quotes\n self._swarm_series.at[_exo_price_array.index[i], 'equity'] = self._swarm_series['equity'].values[-1] + profit\n self._swarm_series.at[_exo_price_array.index[i], 'delta'] = delta_value\n self._swarm_series.at[_exo_price_array.index[i], 'exposure'] = _exposure\n self._swarm_series.at[_exo_price_array.index[i], 'costs'] = _costs_value\n\n # Update self.last_* properties for next loop step\n if self._last_date != _exo_price_array.index[i]:\n #\n # Suppress last_exposure overwriting if we recalculating swarm on current date\n #\n self._last_prev_exposure = self._last_exposure\n self._last_exposure = _exposure\n self._last_exoquote = _exo_price_array.values[i]\n self._last_date = _exo_price_array.index[i]\n self._last_delta = delta_value",
"def get_market_impact(self, turnovers):\n self.market_impact = pd.concat(map(lambda to: self.analysis.market_impact(turnover=to, side=\"two\")/self.analysis.mid_price, turnovers), \n keys=turnovers, axis=1\n )\n return self.market_impact",
"def hpm_loss(self, x, y, t, Ex_u, Ex_v):\n\n x = x.view(-1)\n y = y.view(-1)\n t = t.view(-1)\n\n u, v, f_u, f_v = self.net_pde(x, y, t)\n\n Ex_u = Ex_u.view(-1)\n Ex_v = Ex_v.view(-1)\n\n hpmLoss = torch.mean(f_u ** 2) + torch.mean(f_v ** 2) + torch.mean((u - Ex_u) ** 2) + torch.mean((v - Ex_v) ** 2) \n return hpmLoss",
"def computeIntercepts():\n pass",
"def applyEffect(self, user, target, environment):\n pkmn = self.getEffectedPokemon(user, target)\n self.affectPkmn(pkmn)",
"def do_damage(self) -> float:\n sum = 0\n for operator in self.__operators:\n if operator.is_alive:\n operator.experience += 1\n sum += operator.experience / 100\n return 0.1 + sum",
"def Insurance(Md,X):\n if VERSION == 16:\n utemp = 0.0*X[iu]+1.0*Md.ubar\n elif VERSION == 31:\n utemp = 1.25*(X[iu]-Md.ubar)+1.0*Md.ubar\n else:\n utemp = X[iu]\n\n Mom = Md.IP.get_Moments(utemp,Md.ubar,Md.tau)\n return beta*(-Mom[1]+Mom[3]/Mom[0])",
"def Incentives(Md,X,EEnvPrime):\n u = X[iu]\n b = Md.b()\n\n\n\n CplusG = 1./X[iMU] * (1+SSGRatio*X[ieG])\n\n SD = SkillDeriv(Md,X)\n SkillRisk = beta * ( EEnvPrime[iEnv_EAlph] *(1-delta)*X[iEAlph]*SD[0]\n + EEnvPrime[iEnv_ElogAlpha]*(1-delta)*SD[1])\n\n dWdu = (log(b) + (1-b)/(1-u+u*b) - (1+chi)*X[iA]*X[ih]/CplusG/X[iSA]\n + (1+chi)*psi_1*X[iM]**psi_2/CplusG+gamma_0*X[ih]**(1+gamma)/(1+gamma)-psy + SkillRisk)\n dqdb_M = (1./kappa)*X[iq] * X[idVndb] / X[iVn]\n dudq_M = -X[iM]*(X[iulag]+upsilon*(1-X[iulag]))\n dudb_M = dudq_M * dqdb_M\n\n dWdh = ( (1+chi)*X[iA]/CplusG/X[iSA] - gamma_0 * X[ih]**gamma ) * (1-u)\n\n # dhdq = -zeta_2/(1+gamma)*X[ih]/(1-X[iu])*dudq_M\n # dhdubar = zeta_2/(1+gamma)*X[ih]/(1-Md.ubar)\n # dhdb = dhdq/kappa*X[iq]/X[iVn]*X[idVndb] + dhdubar * Md.dubardb\n\n dhdu = -zeta_2/(1+gamma)*X[ih]/(1-X[iu])\n\n\n XSS = Md.XSS\n dhdubar = zeta_2/(1+gamma)*X[ih]/(1-XSS[iu])\n dubardq_M = -XSS[iM]*(XSS[iulag]+upsilon*(1-XSS[iulag]))\n dqbardb_M = (1./kappa)*XSS[iq] * XSS[idVndb] / XSS[iVn]\n\n\n dhdb = dhdu*dudq_M*dqdb_M + dhdubar * dubardq_M*dqbardb_M\n\n\n return dWdu * dudb_M - (X[iulag]+upsilon*(1-X[iulag]))*X[iM]*X[iVn]*dqdb_M + dWdh * dhdb + beta*dudb_M*EEnvPrime[iEnv_ulag]",
"def impress(self):\n raise NotImplementedError",
"def get_oracle_action(self, obs) -> np.ndarray:\n cam_u = obs['achieved_goal'][0] * RENDER_WIDTH\n cam_v = obs['achieved_goal'][1] * RENDER_HEIGHT\n self.ecm.homo_delta = np.array([cam_u, cam_v]).reshape((2, 1))\n if np.linalg.norm(self.ecm.homo_delta) < 1 and np.linalg.norm(self.ecm.wz) < 0.1:\n # e difference is small enough\n action = np.zeros(3)\n else:\n print(\"Pixel error: {:.4f}\".format(np.linalg.norm(self.ecm.homo_delta)))\n # controller\n fov = np.deg2rad(FoV)\n fx = (RENDER_WIDTH / 2) / np.tan(fov / 2)\n fy = (RENDER_HEIGHT / 2) / np.tan(fov / 2) # TODO: not sure\n cz = 1.0\n Lmatrix = np.array([[-fx / cz, 0., cam_u / cz],\n [0., -fy / cz, cam_v / cz]])\n action = 0.5 * np.dot(np.linalg.pinv(Lmatrix), self.ecm.homo_delta).flatten() / 0.01\n if np.abs(action).max() > 1:\n action /= np.abs(action).max()\n action *= 0.8\n return action",
"def CalcularImpacto(request, id_item):\n item = Item.objects.get(id=id_item)\n\n item.complejidadtotal = impacto_complejidad(id_item)\n item.costototal = impacto_costo(id_item)\n item.save()\n messages.info(request, \"Se calculo corresctamente el impacto de modificacion del itrm %s .\" % item)\n return HttpResponseRedirect('/admin/todo/item')",
"def eff(self):\n return self._eff",
"def eff(self):\n return self._eff",
"def add_impact(r, view_o, impact):\r\n for v in view_o.get(r,[]):\r\n impact.add(v)\r\n add_impact(v, view_o, impact)",
"def get_hit(self):\n for bossProjectile in self.overlapping_sprites:\n self.score.value -= 10\n self.score.right = games.screen.width - 10 \n bossProjectile.handle_caught()",
"def damages(self, gross_output, temp_atmosphere, abatement=None):\n a1 = self.damages_terms[0]\n a2 = self.damages_terms[1]\n a3 = self.damages_terms[2]\n return ne.evaluate('gross_output * (1 - 1 / (1 + a1 * temp_atmosphere + a2 * temp_atmosphere ** a3))')"
] |
[
"0.6713164",
"0.5624437",
"0.5555471",
"0.5456832",
"0.5363461",
"0.53354156",
"0.53214157",
"0.5289515",
"0.528789",
"0.52875054",
"0.52398354",
"0.52398354",
"0.5215647",
"0.5214677",
"0.5208311",
"0.52081144",
"0.52079606",
"0.51916474",
"0.5190379",
"0.51640564",
"0.5161768",
"0.5151063",
"0.5111389",
"0.5109165",
"0.5103665",
"0.50920224",
"0.50920224",
"0.5028744",
"0.5023328",
"0.50211275"
] |
0.69947916
|
0
|
Compute impact for inpute exposure indexes and impact function.
|
def exp_impact_mortality(impact, exp_iimp, exposures, key, hazard, imp_fun, insure_flag, kanton):
if not exp_iimp.size:
return
if kanton is None:
kanton_name = 'CH'
else:
kanton_name = kanton
directory = '../../input_data/impact_functions/'
annual_deaths = pd.read_excel(''.join([directory, 'annual_deaths.xlsx']), sheet_name = key)
# file containing the number of annual deaths per CH / Canton for each age category
# PREPROCESSING STEP:
# get assigned centroids
icens = exposures[INDICATOR_CENTR + hazard.tag.haz_type].values[exp_iimp]
# get affected intensities
temperature_matrix = hazard.intensity[:, icens] # intensity of the hazard
# get affected fractions
fract = hazard.fraction[:, icens] # frequency of the hazard
# get exposure values
exposure_values = exposures.value.values[exp_iimp]
expected_deaths = {}
daily_deaths = annual_deaths[annual_deaths['Canton'] == kanton_name]['Annual_deaths'].values[0] / 365
max_temp = temperature_matrix.max()
for value in range(22, int(np.ceil(max_temp)) + 1):
expected_deaths[value] = daily_deaths / imp_fun.calc_mdr(value)
#print(expected_deaths)
# Compute impact matrix
matrix = impact_mortality(temperature_matrix, exposure_values, icens, expected_deaths, imp_fun, fract.shape)
if insure_flag and matrix.nonzero()[0].size:
inten_val = hazard.intensity[:, icens].todense()
paa = np.interp(inten_val, imp_fun.intensity, imp_fun.paa)
matrix = np.minimum(np.maximum(matrix - \
exposures.deductible.values[exp_iimp] * paa, 0), \
exposures.cover.values[exp_iimp])
impact.eai_exp[exp_iimp] += np.sum(np.asarray(matrix) * \
hazard.frequency.reshape(-1, 1), axis=0)
else:
impact.eai_exp[exp_iimp] += np.squeeze(np.asarray(np.sum( \
matrix.multiply(hazard.frequency.reshape(-1, 1)), axis=0)))
impact.at_event += np.squeeze(np.asarray(np.sum(matrix, axis=1)))
impact.tot_value += np.sum(exposures.value.values[exp_iimp])
if not isinstance(impact.imp_mat, list):
impact.imp_mat[:, exp_iimp] = matrix
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def calc_mortality(impact, key, exposures, impact_funcs, hazard, kanton, save_mat=False):\r\n # 1. Assign centroids to each exposure if not done\r\n assign_haz = INDICATOR_CENTR + hazard.tag.haz_type\r\n if assign_haz not in exposures:\r\n exposures.assign_centroids(hazard)\r\n else:\r\n LOGGER.info('Exposures matching centroids found in %s', assign_haz)\r\n\r\n # 2. Initialize values\r\n impact.unit = exposures.value_unit\r\n impact.event_id = hazard.event_id\r\n impact.event_name = hazard.event_name\r\n impact.date = hazard.date\r\n impact.coord_exp = np.stack([exposures.latitude.values,\r\n exposures.longitude.values], axis=1)\r\n impact.frequency = hazard.frequency\r\n impact.at_event = np.zeros(hazard.intensity.shape[0])\r\n impact.eai_exp = np.zeros(exposures.value.size)\r\n impact.tag = {'exp': exposures.tag, 'if_set': impact_funcs.tag,\r\n 'haz': hazard.tag}\r\n impact.crs = exposures.crs\r\n\r\n # Select exposures with positive value and assigned centroid\r\n exp_idx = np.where(np.logical_and(exposures.value > 0, \\\r\n exposures[assign_haz] >= 0))[0]\r\n if exp_idx.size == 0:\r\n LOGGER.warning(\"No affected exposures.\")\r\n\r\n num_events = hazard.intensity.shape[0]\r\n LOGGER.info('Calculating damage for %s assets (>0) and %s events.',\r\n exp_idx.size, num_events)\r\n\r\n # Get damage functions for this hazard\r\n if_haz = INDICATOR_IF + hazard.tag.haz_type\r\n haz_imp = impact_funcs.get_func(hazard.tag.haz_type)\r\n if if_haz not in exposures and INDICATOR_IF not in exposures:\r\n LOGGER.error('Missing exposures impact functions %s.', INDICATOR_IF)\r\n raise ValueError\r\n if if_haz not in exposures:\r\n LOGGER.info('Missing exposures impact functions for hazard %s. ' + \\\r\n 'Using impact functions in %s.', if_haz, INDICATOR_IF)\r\n if_haz = INDICATOR_IF\r\n\r\n # Check if deductible and cover should be applied\r\n insure_flag = False\r\n if ('deductible' in exposures) and ('cover' in exposures) \\\r\n and exposures.cover.max():\r\n insure_flag = True\r\n\r\n if save_mat:\r\n impact.imp_mat = sparse.lil_matrix((impact.date.size, exposures.value.size))\r\n\r\n # 3. Loop over exposures according to their impact function\r\n tot_exp = 0\r\n for imp_fun in haz_imp:\r\n # get indices of all the exposures with this impact function\r\n exp_iimp = np.where(exposures[if_haz].values[exp_idx] == imp_fun.id)[0]\r\n tot_exp += exp_iimp.size\r\n exp_step = int(CONFIG['global']['max_matrix_size'] / num_events)\r\n if not exp_step:\r\n LOGGER.error('Increase max_matrix_size configuration parameter'\r\n ' to > %s', str(num_events))\r\n raise ValueError\r\n # separte in chunks\r\n chk = -1\r\n for chk in range(int(exp_iimp.size / exp_step)):\r\n exp_impact_mortality(impact, \\\r\n exp_idx[exp_iimp[chk * exp_step:(chk + 1) * exp_step]], \\\r\n exposures, key, hazard, imp_fun, insure_flag, kanton)\r\n exp_impact_mortality(impact, exp_idx[exp_iimp[(chk + 1) * exp_step:]], \\\r\n exposures, key, hazard, imp_fun, insure_flag, kanton)\r\n\r\n if not tot_exp:\r\n LOGGER.warning('No impact functions match the exposures.')\r\n impact.aai_agg = sum(impact.at_event * hazard.frequency)\r\n\r\n if save_mat:\r\n impact.imp_mat = impact.imp_mat.tocsr()",
"def calculate_exposure(self, indices):\n total = 0.0\n for index in indices:\n try:\n total += float(self.info[str(index)])\n except KeyError:\n return -1\n return total",
"def impact_update(self, impact_surfaces, impact_state): \n # Set MVI state 2 to impact state.\n self._s2 = (impact_state['t'], impact_state['q'], impact_state['p'])\n self._tau1 = TAU(self)\n\n # Apply the discrete impact map.\n dis = discrete_plastic_impact(self, impact_surfaces)\n \n # Add constraints to the system.\n add_constraints(self, impact_surfaces)",
"def calculate_impact(layers, impact_fcn,\n comment=''):\n\n # Input checks\n check_data_integrity(layers)\n\n # Get an instance of the passed impact_fcn\n impact_function = impact_fcn()\n\n # Pass input layers to plugin\n F = impact_function.run(layers)\n\n msg = 'Impact function %s returned None' % str(impact_function)\n verify(F is not None, msg)\n\n # Write result and return filename\n if F.is_raster:\n extension = '.tif'\n # use default style for raster\n else:\n extension = '.shp'\n # use default style for vector\n\n output_filename = unique_filename(suffix=extension)\n F.filename = output_filename\n F.write_to_file(output_filename)\n\n # Establish default name (layer1 X layer1 x impact_function)\n if not F.get_name():\n default_name = ''\n for layer in layers:\n default_name += layer.name + ' X '\n\n if hasattr(impact_function, 'plugin_name'):\n default_name += impact_function.plugin_name\n else:\n # Strip trailing 'X'\n default_name = default_name[:-2]\n\n F.set_name(default_name)\n\n # FIXME (Ole): If we need to save style as defined by the impact_function\n # this is the place\n\n # Return layer object\n return F",
"def exposure():\n def r(x):\n return x/6e4\n\n def w(x):\n return int(x*6e4)\n return r, w",
"def __call__(self):\n\n if self.indices is []:\n return\n\n\n #-----------------------------------------\n # Here is where the important formula is applied\n #----------------------------------------\n if self.indices is None:\n height = self.stage_c - self.elev_c\n self.friction_c[:] = self.friction(height)\n else:\n ind = self.indices\n height = self.stage_c[ind] - self.elev_c[ind]\n self.friction_c[ind] = self.friction(height)",
"def computeIntercepts():\n pass",
"def dynamic(self):\n # FrostIndexChangeRate=-(1-Afrost)*FrostIndex - Tavg*exp(-0.04*Kfrost*SnowCover/SnowWaterEquivalent);\n\n FrostIndexChangeRate = -(1 - self.var.Afrost) * self.var.FrostIndex - self.var.Tavg * \\\n np.exp(-0.04 * self.var.Kfrost * self.var.SnowCover / self.var.SnowWaterEquivalent)\n # FrostIndexChangeRate=self.var.AfrostIndex - self.var.Tavg* pcraster.exp(self.var.Kfrost*self.var.SnowCover*self.var.InvSnowWaterEquivalent)\n # Rate of change of frost index (expressed as rate, [degree days/day])\n # CHANGED 9 September 2004:\n # - first term should be negative\n # - second term should be subtracted, not added!!\n\n self.var.FrostIndex = np.maximum(self.var.FrostIndex + FrostIndexChangeRate * self.var.DtDay, 0)\n # frost index in soil [degree days]\n # based on Molnau and Bissel (1983, A Continuous Frozen Ground Index for Flood\n # Forecasting. In: Maidment, Handbook of Hydrology, p. 7.28, 7.55)\n # if Tavg is above zero, FrostIndex will stay 0\n # if Tavg is negative, FrostIndex will increase with 1 per degree C per day\n # Exponent of 0.04 (instead of 0.4 in HoH): conversion [cm] to [mm]!\n # Division by SnowDensity because SnowDepth is expressed as equivalent water\n # depth(always less than depth of snow pack)\n # SnowWaterEquivalent taken as 0.100 (based on density of 100 kg/m3) (Handbook of Hydrology, p. 7.5)\n # Afrost, (daily decay coefficient) is taken as 0.97 (Handbook of Hydrology,\n # p. 7.28)\n # Kfrost, (snow depth reduction coefficient) is taken as 0.57 [1/cm],\n # (HH, p. 7.28)",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))",
"def compute(self, offsets):\n # De-scale the offsets\n offsets = offsets.copy() / self.scale\n \n energy = 0.\n for (fieldU, fieldL), t in self.terms.iteritems():\n iU = t['upper_index']\n iL = t['lower_index']\n levelDiff = t['weight'] * (t['delta'] - offsets[iU] + offsets[iL])\n energy += levelDiff*levelDiff\n if energy < self.bestEnergy:\n self.bestEnergy = energy\n self.bestOffsets = offsets.copy()\n self.ncalls += 1\n # Return the energy\n return energy",
"def compute(self, inp, output=None, do_exc=True, do_vxc=True, do_fxc=False, do_kxc=False, do_lxc=False):\n\n # Check flags\n if not self._have_exc and do_exc:\n raise ValueError(\"Functional '%s' does not have EXC capabilities.\" % self.get_name())\n if not self._have_vxc and do_vxc:\n raise ValueError(\"Functional '%s' does not have VXC capabilities built in.\" % self.get_name())\n if not self._have_fxc and do_fxc:\n raise ValueError(\"Functional '%s' does not have FXC capabilities built in.\" % self.get_name())\n if not self._have_kxc and do_kxc:\n raise ValueError(\"Functional '%s' does not have KXC capabilities built in.\" % self.get_name())\n if not self._have_lxc and do_lxc:\n raise ValueError(\"Functional '%s' does not have LXC capabilities built in.\" % self.get_name())\n\n # Parse input arrays\n if isinstance(inp, np.ndarray):\n inp = {\"rho\": np.asarray(inp, dtype=np.double)}\n elif isinstance(inp, dict):\n inp = {k: np.asarray(v, dtype=np.double) for k, v in inp.items()}\n else:\n raise KeyError(\"Input must have a 'rho' variable or a single array.\")\n\n # How long are we?\n npoints = int(inp[\"rho\"].size / self._spin)\n if (inp[\"rho\"].size % self._spin):\n raise ValueError(\"Rho input has an invalid shape, must be divisible by %d\" % self._spin)\n\n # Find the right compute function\n args = [self.xc_func, ctypes.c_size_t(npoints)]\n if self.get_family() in [flags.XC_FAMILY_LDA, flags.XC_FAMILY_HYB_LDA]:\n input_labels = [\"rho\"]\n input_num_args = 1\n\n output_labels = [\n \"zk\", # 1, 1\n \"vrho\", # 1, 2\n \"v2rho2\", # 1, 3\n \"v3rho3\", # 1, 4\n \"v4rho4\" # 1, 5\n ]\n\n # Build input args\n output = _check_arrays(output, output_labels[0:1],\n self.xc_func_sizes, npoints, do_exc)\n output = _check_arrays(output, output_labels[1:2],\n self.xc_func_sizes, npoints, do_vxc)\n output = _check_arrays(output, output_labels[2:3],\n self.xc_func_sizes, npoints, do_fxc)\n output = _check_arrays(output, output_labels[3:4],\n self.xc_func_sizes, npoints, do_kxc)\n output = _check_arrays(output, output_labels[4:5],\n self.xc_func_sizes, npoints, do_lxc)\n\n args.extend([ inp[x] for x in input_labels])\n args.extend([output[x] for x in output_labels])\n\n core.xc_lda(*args)\n\n elif self.get_family() in [flags.XC_FAMILY_GGA, flags.XC_FAMILY_HYB_GGA]:\n input_labels = [\"rho\", \"sigma\"]\n input_num_args = 2\n\n output_labels = [\n \"zk\", # 1, 1\n \"vrho\", \"vsigma\", # 2, 3\n \"v2rho2\", \"v2rhosigma\", \"v2sigma2\", # 3, 6\n \"v3rho3\", \"v3rho2sigma\", \"v3rhosigma2\", \"v3sigma3\", # 4, 10\n \"v4rho4\", \"v4rho3sigma\", \"v4rho2sigma2\", \"v4rhosigma3\", \"v4sigma4\" # 5, 15\n ]\n\n # Build input args\n output = _check_arrays(output, output_labels[0:1],\n self.xc_func_sizes, npoints, do_exc)\n output = _check_arrays(output, output_labels[1:3],\n self.xc_func_sizes, npoints, do_vxc)\n output = _check_arrays(output, output_labels[3:6],\n self.xc_func_sizes, npoints, do_fxc)\n output = _check_arrays(output, output_labels[6:10],\n self.xc_func_sizes, npoints, do_kxc)\n output = _check_arrays(output, output_labels[10:15],\n self.xc_func_sizes, npoints, do_lxc)\n\n args.extend([ inp[x] for x in input_labels])\n args.extend([output[x] for x in output_labels])\n\n core.xc_gga(*args)\n\n elif self.get_family() in [flags.XC_FAMILY_MGGA, flags.XC_FAMILY_HYB_MGGA]:\n # Build input args\n if self._needs_laplacian:\n input_labels = [\"rho\", \"sigma\", \"lapl\", \"tau\"]\n else:\n input_labels = [\"rho\", \"sigma\", \"tau\"]\n input_num_args = 4\n\n output_labels = [\n \"zk\", # 1, 1\n \"vrho\", \"vsigma\", \"vlapl\", \"vtau\", # 4, 5\n \"v2rho2\", \"v2rhosigma\", \"v2rholapl\", \"v2rhotau\", \"v2sigma2\", # 10, 15\n \"v2sigmalapl\", \"v2sigmatau\", \"v2lapl2\", \"v2lapltau\", \"v2tau2\",\n \"v3rho3\", \"v3rho2sigma\", \"v3rho2lapl\", \"v3rho2tau\", \"v3rhosigma2\", # 20, 35\n \"v3rhosigmalapl\", \"v3rhosigmatau\", \"v3rholapl2\", \"v3rholapltau\",\n \"v3rhotau2\", \"v3sigma3\", \"v3sigma2lapl\", \"v3sigma2tau\",\n \"v3sigmalapl2\", \"v3sigmalapltau\", \"v3sigmatau2\", \"v3lapl3\",\n \"v3lapl2tau\", \"v3lapltau2\", \"v3tau3\",\n \"v4rho4\", \"v4rho3sigma\", \"v4rho3lapl\", \"v4rho3tau\", \"v4rho2sigma2\", # 35, 70\n \"v4rho2sigmalapl\", \"v4rho2sigmatau\", \"v4rho2lapl2\", \"v4rho2lapltau\",\n \"v4rho2tau2\", \"v4rhosigma3\", \"v4rhosigma2lapl\", \"v4rhosigma2tau\",\n \"v4rhosigmalapl2\", \"v4rhosigmalapltau\", \"v4rhosigmatau2\",\n \"v4rholapl3\", \"v4rholapl2tau\", \"v4rholapltau2\", \"v4rhotau3\",\n \"v4sigma4\", \"v4sigma3lapl\", \"v4sigma3tau\", \"v4sigma2lapl2\",\n \"v4sigma2lapltau\", \"v4sigma2tau2\", \"v4sigmalapl3\", \"v4sigmalapl2tau\",\n \"v4sigmalapltau2\", \"v4sigmatau3\", \"v4lapl4\", \"v4lapl3tau\",\n \"v4lapl2tau2\", \"v4lapltau3\", \"v4tau4\"\n ]\n\n # Build input args\n output = _check_arrays(output, output_labels[0:1],\n self.xc_func_sizes, npoints, do_exc)\n output = _check_arrays(output, output_labels[1:5],\n self.xc_func_sizes, npoints, do_vxc)\n output = _check_arrays(output, output_labels[5:15],\n self.xc_func_sizes, npoints, do_fxc)\n output = _check_arrays(output, output_labels[15:35],\n self.xc_func_sizes, npoints, do_kxc)\n output = _check_arrays(output, output_labels[35:70],\n self.xc_func_sizes, npoints, do_lxc)\n\n args.extend([ inp[x] for x in input_labels])\n if not self._needs_laplacian:\n args.insert(-1, np.empty((1))) # Add none ptr to laplacian\n args.extend([output[x] for x in output_labels])\n\n core.xc_mgga(*args)\n\n else:\n raise KeyError(\"Functional kind not recognized! (%d)\" % self.get_kind())\n\n return {k: v for k, v in zip(output_labels, args[2+input_num_args:]) if not v is None}",
"def exposure(self):\n\n # define a range of declination to evaluate the\n # exposure at\n self.declination = np.linspace(-np.pi/2, np.pi/2, self.num_points)\n\n m = np.asarray([m_dec(d, self.params) for d in self.declination])\n \n # normalise to a maximum at 1\n self.exposure_factor = (m / m_dec(-np.pi/2, self.params))\n\n # find the point at which the exposure factor is 0\n self.limiting_dec = Angle((self.declination[m == 0])[0], 'rad')",
"def _compute_lc_exp(self,srcmdl = 'none', target = \"\", specin = -2.1, overwrite=False, **kwargs):\n\n\n\tloglevel = kwargs.get('loglevel', self.loglevel)\n\n\tfor i,c in enumerate(self.components):\n\n\t kw = dict(infile=c.files['lcmap'],\n\t\tscfile=c.data_files['scfile'],\n\t\tirfs = c.config['gtlike']['irfs'],\n\t\tsrcmdl = path.join(self.workdir,'{1:s}_{0:02n}.xml'.format(i,srcmdl)) \\\n\t\t\t if not target == \"\" else \"none\",\n\t\ttarget = target, \n\t\tspecin = specin,\n\t\temin = c.config['selection']['emin'],\n\t\temax = c.config['selection']['emax'],\n\t\tenumbins = self.enumbins\n\t\t)\n\n\t print kw['target'], kw['srcmdl']\n\t run_gtapp('gtexposure', self.logger, kw, loglevel=loglevel)\n\treturn",
"def efc_calcs(df_param_indexed):\n \n df_param_indexed = df_param_indexed.copy()\n \n ''' commented 20180210 after Calmetrix update\n # Remove for cc1 data exported with cc2\n mix_start = datetime.strptime(\n df_param_indexed.loc['Mix Time', 1], \"%d-%b-%Y %H:%M:%S\")\n log_start = datetime.strptime(\n df_param_indexed.loc['Start Time', 1], \"%d-%b-%Y %H:%M:%S\")\n time_difference = (log_start - mix_start).total_seconds()\n '''\n\n # Calculate mass of binder in sample\n m_slag = float(df_param_indexed.loc['Suppl 1 Mass, g', 1])\n m_fa = float(df_param_indexed.loc['Suppl 2 Mass, g', 1])\n m_water = float(df_param_indexed.loc['Water Mass, g', 1])\n m_agg = float(df_param_indexed.loc['Aggr Mass, g', 1])\n m_sample = float(df_param_indexed.loc['Sample Mass, g', 1])\n m_sample_scm = m_sample / (m_slag + m_fa + m_water + m_agg) * (m_slag + m_fa)\n \n return m_sample_scm",
"def isi_calc(self):\n arg = erfinv(0.8)*1.0E6/(self.speedup*self.br_nominal)\n print('arg: ', arg)\n\n # calculate center eye opening with no additional impairments\n self.isi_center = 2.0*erf(arg/self.tc) - self.l_1 # column Z\n\n # calculate center eye opening with residual DJ (DJ - DCD)\n self.isi_dj_center = (erf(arg*(1.0+self.dj_ui)/self.tc) + erf(arg*(1.0-self.dj_ui)/self.tc) - self.l_1) # column AD\n\n # calculate eye closing induced by interferometric effects from link end reflections\n mean_reflection = math.pow(10.0,0.05*(self.rx_reflection + self.tx_reflection)) # cell AB5\n er_lin = math.pow(10.0,0.1*self.er_dB_min) # cell AB7\n\n\n arg1 = np.sqrt(2.0*er_lin*self.isi_dj_center*(er_lin-1.0) + (er_lin+1.0)*self.l_1)\n print('arg1: ', arg1)\n arg2 = np.divide(arg1,self.isi_dj_center)\n arg3 = (2.0*self.ref_nf*np.power(10.0,-0.1*self.chil)*mean_reflection)\n self.isi_reflection = self.l_1-np.multiply(arg2,arg3)\n\n # calculate center eye opening with both residual DJ and reflection degradations included\n self.isi_dj_refl_closed = np.multiply(self.isi_dj_center, self.isi_reflection) # column AA\n print('isi_dj_refl_closed (AA) : ', self.isi_dj_refl_closed)\n \n # calculate eye opening at the corners with no additional impairments\n eff_rx_eye = 2.0*(0.5-self.X2)*self.speedup\n self.isi_corners = (erf(arg*(1.0+eff_rx_eye)/self.tc) + erf(arg*(1.0-eff_rx_eye)/self.tc) - self.l_1) # column AB\n\n # calculate eye opening at the corners with residual DJ impairment\n self.isi_dj_corners = (erf(arg*(1.0+eff_rx_eye+self.dj_ui)/self.tc) + erf(arg*(1.0-eff_rx_eye-self.dj_ui)/self.tc) - self.l_1) # column AC\n self.isi_tp4_rx = (erf(arg*(1.0+eff_rx_eye)/self.rx_1090_rise) + erf(arg*(1.0-eff_rx_eye)/self.rx_1090_rise) - 1) # cell AG5\n\n # end of GbE10.isi_calcdef isi_calc(self):",
"def expose(self):\n\n ## Determine type of exposure (exp, series, stack)\n exptype = str(self.exptypeComboBox.currentText())\n mode = self.modedict[exptype]\n\n ## Get exposure parameters\n if mode == \"bias\":\n exptime = 0.0\n else:\n exptime = self.exptimeSpinBox.value()\n imcount = self.imstackSpinBox.value()\n seqnum = self.imnumSpinBox.value()\n mintime = self.minexpSpinBox.value()\n maxtime = self.maxexpSpinBox.value()\n step = self.tstepSpinBox.value()\n\n ## Determine filter kwargs\n if self.filterToggleButton.isChecked():\n kwargs = {'filter_name' : str(self.filterComboBox.currentText())}\n else:\n kwargs = {'monowl' : self.monoSpinBox.value()}\n\n if self.testimCheckBox.isChecked():\n title = 'test'\n else:\n title = str(self.imtitleLineEdit.text())\n\n ## Build filepath\n filepath = os.path.join(str(self.imfilenameLineEdit.text()),title)\n \n ## Check if single exposure\n if exptype in [\"Exposure\", \"Dark\", \"Bias\"]:\n\n ## Perform exposure\n self.logger.info(\"Starting {0}s {1} image.\".format(exptime, exptype))\n self.image_start.emit(1)\n\n try:\n filename = exposure.im_acq(mode, filepath, exptime, seqnum, **kwargs)\n self.image_taken.emit(1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken\".format(mode))\n except IOError:\n self.logger.exception(\"File already exits. Image not taken.\")\n else:\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.seqnum_inc.emit(seqnum)\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename,\n '-zoom', 'to', 'fit', '-cmap', 'b'])\n\n ## Check if a stack of exposures of same type\n elif exptype in [\"Exposure Stack\", \"Dark Stack\", \"Bias Stack\"]:\n\n total = seqnum + imcount\n self.logger.info(\"Starting {0}s {1} stack.\".format(exptime, exptype))\n self.image_start.emit(imcount)\n\n try:\n for i in range(seqnum, total):\n self.logger.info(\"Starting image {0} of {1}.\".format(i+1-seqnum, imcount))\n filename = exposure.im_acq(mode, filepath, exptime, i, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1-seqnum)\n self.seqnum_inc.emit(i)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure stack finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n \n ## Check if a series of exposures of increase exposure time\n elif exptype in [\"Exposure Series\", \"Dark Series\"]:\n\n ## Parameter checks\n if mintime > maxtime:\n self.logger.warning(\"Minimum time must be less than Maximum time. Series not started.\")\n return\n elif step <= 0:\n self.logger.warning(\"Time step must be greater than 0. Series not started.\")\n return\n\n ## Construct array of exposure times\n t = mintime\n time_array = []\n while t <= maxtime:\n time_array.append(t)\n t += step\n \n ## Perform series\n self.logger.info(\"Starting {0} series with mintime {1}, maxtime {2}, and step {3}.\".format(exptype, mintime, maxtime, step))\n self.image_start.emit(len(time_array))\n \n try:\n for i, time in enumerate(time_array):\n self.logger.info(\"Starting {0}s {1} image.\".format(time, mode))\n filename = exposure.im_acq(mode, filepath, time, seqnum, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure series finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n self.seqnum_inc.emit(seqnum)",
"def _call(self, X):\n\n dim=2\n \n Imv = self.operator.Shooting(X)\n I = Imv[0]\n m = Imv[1]\n v = Imv[2]\n N = I.shape[0] - 1\n \n mhat = m[0].space.zero()\n Ihat = self.operator.reg_param * self.operator.attach.gradient(I[-1])\n \n # Create the gradient op\n grad_op = Gradient(domain=self.operator.space, method='forward',pad_mode='constant', pad_const = 0)\n # Create the divergence op\n div_op = -grad_op.adjoint\n\n \n for i in range(N):\n gradmhat0 = grad_op(mhat[0])\n gradmhat1 = grad_op(mhat[1])\n gradmhat = [gradmhat0, gradmhat1]\n gradI = grad_op(I[N-i])\n coad0 = sum(grad_op(m[N-i][0])*mhat) + sum([gradmhat[j][0] * m[N-i][j] for j in range(dim)]) + div_op(mhat)*m[N-i][0]\n coad1 = sum(grad_op(m[N-i][1])*mhat) + sum([gradmhat[j][1] * m[N-i][j] for j in range(dim)]) + div_op(mhat)*m[N-i][1]\n coad_mhat_m = mhat.space.element([coad0, coad1])\n \n vhat = (2 * np.pi) ** (dim / 2.0) * self.vectorial_ft_fit_op.inverse(self.vectorial_ft_fit_op(-coad_mhat_m + Ihat * gradI) * self.ft_kernel_fitting)\n \n Ihat = Ihat + 1/N * div_op(Ihat * v[N-i])\n advm0 = sum(grad_op(v[N-i][0])*mhat) - sum(grad_op(mhat[0])*v[N-i])\n advm1 = sum(grad_op(v[N-i][1])*mhat) - sum(grad_op(mhat[1])*v[N-i]) \n advm = mhat.space.element([advm0, advm1])\n mhat = mhat - 1/N*(advm + vhat)\n \n Km = (2 * np.pi) ** (dim / 2.0) * self.vectorial_ft_fit_op.inverse(self.vectorial_ft_fit_op(m[0]) * self.ft_kernel_fitting)\n \n return Km + mhat",
"def forward(self, value, query, lens):\n relevant_scores = self.relevant_score(value, query, lens)\n e_relevant_scores = torch.exp(relevant_scores)\n weights = e_relevant_scores / e_relevant_scores.sum(-1, keepdim=True)\n attention = (weights.unsqueeze(-1) * value).sum(1)\n return attention",
"def run(layers):\n\n # Depth above which people are regarded affected [m]\n threshold = 0.1\n thresholds = [0.1, 0.2, 0.3, 0.5, 0.8, 1.0]\n\n # Identify hazard and exposure layers\n inundation = get_hazard_layer(layers) # Flood inundation [m]\n\n # Get population and gender ratio\n population = gender_ratio = None\n for layer in get_exposure_layers(layers):\n keywords = layer.get_keywords()\n\n if 'datatype' not in keywords:\n population = layer\n else:\n datatype = keywords['datatype']\n\n if 'population' in datatype and 'density' in datatype:\n population = layer\n\n if 'female' in datatype and 'ratio' in datatype:\n gender_ratio_unit = keywords['unit']\n\n msg = ('Unit for gender ratio must be either '\n '\"percent\" or \"ratio\"')\n assert gender_ratio_unit in ['percent', 'ratio'], msg\n\n gender_ratio = layer\n\n msg = 'No population layer was found in: %s' % str(layers)\n assert population is not None, msg\n\n # Extract data as numeric arrays\n D = inundation.get_data(nan=0.0) # Depth\n\n # Calculate impact as population exposed to depths > threshold\n if population.get_resolution(native=True, isotropic=True) < 0.0005:\n # Keep this for backwards compatibility just a little while\n # This uses the original custom population set and\n # serves as a reference\n\n P = population.get_data(nan=0.0) # Population density\n pixel_area = 2500\n I = numpy.where(D > threshold, P, 0) / 100000.0 * pixel_area\n else:\n # This is the new generic way of scaling (issue #168 and #172)\n P = population.get_data(nan=0.0, scaling=True)\n I = numpy.where(D > threshold, P, 0)\n\n if gender_ratio is not None:\n # Extract gender ratio at each pixel (as ratio)\n G = gender_ratio.get_data(nan=0.0)\n if gender_ratio_unit == 'percent':\n G /= 100\n\n # Calculate breakdown\n P_female = P * G\n P_male = P - P_female\n\n I_female = I * G\n I_male = I - I_female\n\n\n # Generate text with result for this study\n total = str(int(sum(P.flat) / 1000))\n count = str(int(sum(I.flat) / 1000))\n\n # Create report\n caption = ('<table border=\"0\" width=\"320px\">'\n ' <tr><td><b>%s:</b></td>'\n '<td align=\"right\"><b>%s</b></td></tr>'\n % ('Jumlah Penduduk', total))\n if gender_ratio is not None:\n total_female = str(int(sum(P_female.flat) / 1000))\n total_male = str(int(sum(P_male.flat) / 1000))\n\n\n caption += (' <tr><td>%s:</td>'\n '<td align=\"right\">%s</td></tr>'\n % (' - Wanita', total_female))\n caption += (' <tr><td>%s:</td>'\n '<td align=\"right\">%s</td></tr>'\n % (' - Pria', total_male))\n caption += '<tr><td> </td></tr>' # Blank separation row\n\n caption += (' <tr><td><b>%s:</b></td>'\n '<td align=\"right\"><b>%s</b></td></tr>'\n % ('Perkiraan Jumlah Terdampak (> %.1fm)' % threshold,\n count))\n\n if gender_ratio is not None:\n affected_female = str(int(sum(I_female.flat) / 1000))\n affected_male = str(int(sum(I_male.flat) / 1000))\n\n\n caption += (' <tr><td>%s:</td>'\n '<td align=\"right\">%s</td></tr>'\n % (' - Wanita', affected_female))\n caption += (' <tr><td>%s:</td>'\n '<td align=\"right\">%s</td></tr>'\n % (' - Pria', affected_male))\n\n caption += '</table>'\n\n caption += '<br>' # Blank separation row\n caption += 'Catatan: Semua nomor x 1000'\n\n # Create raster object and return\n R = Raster(I,\n projection=inundation.get_projection(),\n geotransform=inundation.get_geotransform(),\n name='People affected',\n keywords={'caption': caption})\n return R",
"def apply_science_modules_elasticc(df: DataFrame) -> DataFrame:\n # Required alert columns\n to_expand = ['midPointTai', 'filterName', 'psFlux', 'psFluxErr']\n\n # Use for creating temp name\n prefix = 'c'\n\n # Append temp columns with historical + current measurements\n for colname in to_expand:\n df = concat_col(\n df, colname, prefix=prefix,\n current='diaSource', history='prvDiaForcedSources'\n )\n expanded = [prefix + i for i in to_expand]\n\n _LOG.info(\"New processor: xmatch (random positions)\")\n # Assuming random positions\n df = df.withColumn('cdsxmatch', F.lit('Unknown'))\n\n _LOG.info(\"New processor: asteroids (random positions)\")\n df = df.withColumn('roid', F.lit(0))\n\n # add redshift\n df = df.withColumn('redshift', F.col('diaObject.z_final'))\n df = df.withColumn('redshift_err', F.col('diaObject.z_final_err'))\n\n _LOG.info(\"New processor: EarlySN\")\n args = ['cmidPointTai', 'cfilterName', 'cpsFlux', 'cpsFluxErr']\n\n # fake cdsxmatch and nobs\n args += [F.col('diaObject.ra'), F.col('diaObject.decl')]\n args += [F.col('diaObject.hostgal_ra'), F.col('diaObject.hostgal_dec')]\n args += [F.col('diaObject.hostgal_snsep')]\n args += [F.col('diaObject.hostgal_zphot')]\n args += [F.col('diaObject.hostgal_zphot_err')]\n\n df = df.withColumn('rf_snia_vs_nonia', rfscore_sigmoid_elasticc(*args))\n\n # Apply level one processor: superNNova\n _LOG.info(\"New processor: supernnova - Ia\")\n args = [F.col('diaSource.diaSourceId')]\n args += [F.col('cmidPointTai'), F.col('cfilterName'), F.col('cpsFlux'), F.col('cpsFluxErr')]\n args += [F.col('roid'), F.col('cdsxmatch'), F.array_min('cmidPointTai')]\n args += [F.col('diaObject.mwebv'), F.col('redshift'), F.col('redshift_err')]\n args += [F.lit('elasticc_ia')]\n df = df.withColumn('snn_snia_vs_nonia', snn_ia_elasticc(*args))\n\n _LOG.info(\"New processor: supernnova - Broad\")\n args = [F.col('diaSource.diaSourceId')]\n args += [F.col('cmidPointTai'), F.col('cfilterName'), F.col('cpsFlux'), F.col('cpsFluxErr')]\n args += [F.col('roid'), F.col('cdsxmatch'), F.array_min('cmidPointTai')]\n args += [F.col('diaObject.mwebv'), F.col('redshift'), F.col('redshift_err')]\n args += [F.lit('elasticc_broad')]\n df = df.withColumn('preds_snn', snn_broad_elasticc(*args))\n\n mapping_snn = {\n 0: 11,\n 1: 13,\n 2: 12,\n 3: 22,\n 4: 21,\n }\n mapping_snn_expr = F.create_map([F.lit(x) for x in chain(*mapping_snn.items())])\n\n col_class = F.col('preds_snn').getItem(0).astype('int')\n df = df.withColumn('snn_broad_class', mapping_snn_expr[col_class])\n df = df.withColumn('snn_broad_max_prob', F.col('preds_snn').getItem(1))\n\n # CBPF\n args = ['cmidPointTai', 'cpsFlux', 'cpsFluxErr', 'cfilterName']\n args += [F.col('diaObject.mwebv'), F.col('diaObject.z_final'), F.col('diaObject.z_final_err')]\n args += [F.col('diaObject.hostgal_zphot'), F.col('diaObject.hostgal_zphot_err')]\n df = df.withColumn('cbpf_preds', predict_nn(*args))\n\n mapping_cats_general = {\n 0: 11,\n 1: 12,\n 2: 13,\n 3: 21,\n 4: 22,\n }\n mapping_cats_general_expr = F.create_map([F.lit(x) for x in chain(*mapping_cats_general.items())])\n\n df = df.withColumn('argmax', F.expr('array_position(cbpf_preds, array_max(cbpf_preds)) - 1'))\n df = df.withColumn('cats_broad_class', mapping_cats_general_expr[df['argmax']])\n df = df.withColumn('cats_broad_max_prob', F.array_max(df['cbpf_preds']))\n\n # AGN\n args_forced = [\n 'diaObject.diaObjectId', 'cmidPointTai', 'cpsFlux', 'cpsFluxErr', 'cfilterName',\n 'diaSource.ra', 'diaSource.decl',\n 'diaObject.hostgal_zphot', 'diaObject.hostgal_zphot_err',\n 'diaObject.hostgal_ra', 'diaObject.hostgal_dec'\n ]\n df = df.withColumn('rf_agn_vs_nonagn', agn_elasticc(*args_forced))\n\n # SLSN\n args_forced = [\n 'diaObject.diaObjectId', 'cmidPointTai', 'cpsFlux', 'cpsFluxErr', 'cfilterName',\n 'diaSource.ra', 'diaSource.decl',\n 'diaObject.hostgal_zphot', 'diaObject.hostgal_zphot_err',\n 'diaObject.hostgal_ra', 'diaObject.hostgal_dec'\n ]\n df = df.withColumn('rf_slsn_vs_nonslsn', slsn_elasticc(*args_forced))\n\n # Drop temp columns\n df = df.drop(*expanded)\n df = df.drop(*['preds_snn', 'cbpf_preds', 'redshift', 'redshift_err', 'cdsxmatch', 'roid', 'argmax'])\n\n return df",
"def _evaluate(self,\n x, #\n out,\n *args,\n **kwargs):\n # Stage 1: Execute all refactoring operations in the sequence x\n for refactoring_operation in x.refactoring_operations:\n refactoring_operation.do_refactoring()\n\n # Stage 2: Computing quality attributes\n # Todo: Add testability and modularity objectives\n # Todo: Normalize objective values in a standard range\n # Todo: Reduce QMOOD metrics to one objective by averaging them\n o1 = Objectives.reusability\n o2 = Objectives.understandability\n # o1 = 1/6 * sum qmood metrics\n # o2 = testability ## Our new objective\n # o3 = modularity ## Our new objective\n\n # Stage 3: Marshal objectives into vector\n out[\"F\"] = np.array([-1 * o1, -1 * o2], dtype=float)",
"def f_unc(xpts, offset, *params):\n res = 0\n for i, p in enumerate(coefficients):\n res += p*xpts**i\n return res",
"def _evaluate(self,\n x, #\n out,\n *args,\n **kwargs):\n # Stage 1: Execute all refactoring operations in the sequence x\n for refactoring_operation in x.refactoring_operations:\n refactoring_operation.do_refactoring()\n\n # Stage 2: Computing quality attributes\n # Todo: Add testability and modularity objectives\n # Todo: Normalize objective values in a standard range\n o1 = Objectives.reusability\n o2 = Objectives.understandability\n o3 = Objectives.flexibility\n o4 = Objectives.functionality\n o5 = Objectives.effectiveness\n o6 = Objectives.extendability\n # o7 = testability ## Our new objective\n # o8 = modularity ## Our new objective\n\n # Stage 3: Marshal objectives into vector\n out[\"F\"] = np.array([-1 * o1, -1 * o2, -1 * o3, -1 * o4, -1 * o5, -1 * o6, ], dtype=float)",
"def calc_xi(self):\n\t\n\tk_dot_x = self.k[0]*self.x[0,:,:] + self.k[1]*self.x[1,:,:] + self.k[2]*self.x[2,:,:]\n\n\tself.xi = self.t.reshape((1,self.N)) - k_dot_x/l.Clight\n\n\treturn",
"def evaluateAttack(self, gameState, action):\r\n features = self.getFeaturesAttack(gameState, action)\r\n weights = self.getWeightsAttack(gameState, action)\r\n return features * weights",
"def filter_and_correct_expression_and_image_features(tissue, model, aggregation, patch_size, M, k, pc_correction=False, tf_correction=False):\n\n\n\n\n # Filter expression\n Y, X, dIDs, tIDs, tfs, ths, t_idx = extract_final_layer_data(tissue, model, aggregation, patch_size)\n filt_X, filt_tIDs, final_exp_idx = filter_expression(X, tIDs, M, k)\n\n\n\n if pc_correction:\n print ('Correcting with {} expression PCs'.format(pc_correction))\n pca = PCA(n_components=pc_correction)\n\n\n pca_predictors = pca.fit_transform(filt_X)\n\n # Correct Y\n lr = LinearRegression()\n lr.fit(pca_predictors, Y)\n predicted_Y = lr.predict(pca_predictors)\n corrected_Y = Y - predicted_Y\n\n # Correct X\n projected_filt_X = np.dot(pca_predictors,pca.components_)\n corrected_filt_X = filt_X - projected_filt_X\n\n # Set as return variables\n final_X = corrected_filt_X\n final_Y = corrected_Y\n\n elif tf_correction:\n print('Correcting with all technical factors')\n tf_Y = Y[t_idx,:]\n tf_filt_X = filt_X[t_idx,:]\n\n tfs[list(ths).index('SMTSISCH')] = np.log2(tfs[list(ths).index('SMTSISCH')] + 1)\n tf_predictors = tfs\n\n #Correct Y\n lr_Y = LinearRegression()\n lr_Y.fit(tf_predictors, tf_Y)\n tf_Y_predicted = lr_Y.predict(tf_predictors)\n corrected_tf_Y = tf_Y - tf_Y_predicted\n\n #Correct X\n lr_X = LinearRegression()\n lr_X.fit(tf_predictors, tf_filt_X)\n tf_filt_X_predicted = lr_X.predict(tf_predictors)\n corrected_tf_filt_X = tf_filt_X - tf_filt_X_predicted\n\n # Set as return variables\n final_X = corrected_tf_filt_X\n final_Y = corrected_tf_Y\n else:\n # Set unmodified values as return variables\n final_X = filt_X\n final_Y = Y\n\n return final_Y, final_X, dIDs, filt_tIDs, tfs, ths, t_idx",
"def cost_func(plist):\n\t\tgamma, alpha = plist\n\t\tk = ac.Moffat2DKernel(gamma, alpha, x_size=nx, y_size=ny)\n\n\t\tarr_out_predict = ac.convolve(arr_in, k)\n\n\t\tarr_out_fit, arr_out_predict_fit = match_dimension(arr_out, arr_out_predict)\n\t\tdiff = (arr_out_fit - arr_out_predict_fit)*scale_factor\n\n\t\treturn np.sum(diff**2)/diff.size",
"def estimate_impact(self, datasource_name, target_column, event_name, start_date, end_date,\n result_interval=TimeInterval.day):\n response, _, headers = self._create_session(datasource_name, 'impact', target_column, event_name, start_date,\n end_date, result_interval, is_estimate=True)\n return SessionResponse(response, headers)",
"def deposited_exposure_between_bounds(self, time1: float, time2: float) -> _VectorisedFloat:\n deposited_exposure: _VectorisedFloat = 0.\n for interaction in self.short_range:\n start, stop = interaction.extract_between_bounds(time1, time2)\n short_range_jet_exposure = interaction._normed_jet_exposure_between_bounds(\n self.concentration_model, start, stop)\n short_range_lr_exposure = interaction._normed_interpolated_longrange_exposure_between_bounds(\n self.concentration_model, start, stop)\n dilution = interaction.dilution_factor()\n\n fdep = interaction.expiration.particle.fraction_deposited(evaporation_factor=1.0)\n diameter = interaction.expiration.particle.diameter\n \n # Aerosols not considered given the formula for the initial\n # concentration at mouth/nose.\n if diameter is not None and not np.isscalar(diameter):\n # We compute first the mean of all diameter-dependent quantities\n # to perform properly the Monte-Carlo integration over\n # particle diameters (doing things in another order would\n # lead to wrong results for the probability of infection).\n this_deposited_exposure = (np.array(short_range_jet_exposure\n * fdep).mean()\n - np.array(short_range_lr_exposure * fdep).mean()\n * self.concentration_model.infected.activity.exhalation_rate)\n else:\n # In the case of a single diameter or no diameter defined,\n # one should not take any mean at this stage.\n this_deposited_exposure = (short_range_jet_exposure * fdep\n - short_range_lr_exposure * fdep\n * self.concentration_model.infected.activity.exhalation_rate)\n\n # Multiply by the (diameter-independent) inhalation rate\n deposited_exposure += (this_deposited_exposure *\n interaction.activity.inhalation_rate\n /dilution)\n\n # Then we multiply by diameter-independent quantities: viral load\n # and fraction of infected virions\n f_inf = self.concentration_model.infected.fraction_of_infectious_virus()\n deposited_exposure *= (f_inf\n * self.concentration_model.virus.viral_load_in_sputum\n * (1 - self.exposed.mask.inhale_efficiency()))\n # Long-range concentration\n deposited_exposure += self.long_range_deposited_exposure_between_bounds(time1, time2)\n\n return deposited_exposure",
"def compute(self,input):\n\n for layer in self.layers:\n input = layer.compute(input)\n return input"
] |
[
"0.6271787",
"0.5877842",
"0.5789374",
"0.5760987",
"0.561309",
"0.550563",
"0.5501243",
"0.5386281",
"0.5335529",
"0.52324504",
"0.51998514",
"0.51906973",
"0.51878357",
"0.5186649",
"0.5170273",
"0.51142454",
"0.5087062",
"0.5052597",
"0.50490326",
"0.49957764",
"0.49872687",
"0.4972741",
"0.4969028",
"0.49646512",
"0.4956807",
"0.49497342",
"0.4948568",
"0.49399793",
"0.49368376",
"0.4926248"
] |
0.65110976
|
0
|
Checks object's class and names it. Since we've got multiple nets predicting objects like 0,1,2 classes, we want to make sure it doesn't get confusing during saving data and drawing BBs
|
def determine_object_class(self, components_detected):
for subimage, components in components_detected.items():
for component in components:
if component.class_id == 0:
component.object_name = "insl" # Insulator
elif component.class_id == 1:
component.object_name = "dump" # Vibration dumper
else:
component.object_name = "pillar"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_class(self, obj):\n\n object_type = obj.object_type\n\n 'Background class'\n object_class = 0\n\n # Don't care classes\n if object_type in ['DontCare', 'Person_sitting'] or obj.truncation > 0.75 or obj.occlusion > 1:\n object_class = 1\n\n # Vehicle classes\n elif object_type in ['Car', 'Van']:\n object_class = 2\n\n # Pedestrian class\n elif object_type in ['Pedestrian']: # TODO: Consider change this with ==\n object_class = 3\n\n # Cyclist class\n elif object_type in ['Cyclist']: # TODO: Consider change this with ==\n object_class = 4\n\n return object_class",
"def detect_class_onpic(boxes, allowed_classes):\n object_class = \"all\"\n highest_prob = 0\n for box in boxes:\n box_prob = float(box[1].strip('%')) / 100.0\n if box[0] in allowed_classes and box_prob > highest_prob:\n highest_prob = box_prob\n object_class = box[0]\n return object_class, highest_prob",
"def test_class_names_must_match(self):\n oz = ClassBalance(labels=[\"a\", \"b\", \"c\"])\n dataset = make_fixture(binary=False, split=False)\n\n with pytest.raises(YellowbrickValueError):\n oz.fit(dataset.y)",
"def has_classname(self):\n return self.unpack_word(0x4A) > 0",
"def is_boost_class(obj: Any) -> bool:\n return \"Boost.Python.class\" in str(type(obj))",
"def test_class_name(self):\n r = Review()\n r_dictionary = r.to_dict()\n self.assertIn('__class__', r_dictionary)",
"def obj_is_in_class(obj: unrealsdk.UObject, in_class: str) -> bool:\n return bool(obj.Class == unrealsdk.FindClass(in_class))",
"def get_classLabel(self, dataset, class_label): \n\t\tnode = self.root\n\t\tbroken=0\n\t\t\n\t\t#print(\"BEBE:\" + str(node.get_bebe( dataset)))\n\t\t\n\t\tif (node.get_bebe( dataset) == class_label ):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\n\t\t\tdef junk(data, class_label, seed, ratio):",
"def skip_sample_for_balanced_class(self, img_data):\n class_in_img = False\n for bbox in img_data['bboxes']:\n cls_name = bbox['class']\n if cls_name == self.curr_class:\n class_in_img = True\n ## 更新一次,获取下一次的值\n self.curr_class = next(self.class_cycle)\n break \n if class_in_img:\n return False\n else:\n return True",
"def isclass(object):\n if not inspect.isclass(object):\n return False\n if isbuiltin(object):\n return False\n return type not in inspect.getmro(object)",
"def _validateClassification(self, trainingSet):\n wrongCount = 0.\n\n pv = []\n tv = []\n\n if self.K == 1:\n for example in trainingSet:\n Y = self.test(example)\n \n givenClass = example.label[0]\n if Y[0] < 0.5:\n chosenClass = 0\n else:\n chosenClass = 1\n \n pv.append(chosenClass)\n tv.append(givenClass)\n \n if chosenClass != givenClass:\n wrongCount += 1.\n else:\n for example in trainingSet:\n Y = self.test(example)\n \n posterior, chosenClass = max((x, i) for i, x in enumerate(Y))\n max_val, givenClass = max((x, i) for i, x in enumerate(example.label))\n \n pv.append(chosenClass)\n tv.append(givenClass)\n \t\t\t\n if chosenClass != givenClass:\n wrongCount += 1.\n \n return wrongCount/len(trainingSet), pv, tv",
"def classifier(self):\n\n print \"Starting Classification\"\n self.detections.rotationClass = [ self.detections.rotationTimeTags[index] for index, theta in enumerate(self.detections.rotations) if theta > 30]\n if len(self.detections.rotationClass) < 1:\n print \"Too little rotation hits\"\n self.detections.classification = \"Too little rotation hits\"\n\n else:\n \n for attribute, value in classIterator(self.detections):\n print value[1]\n if 'crease' in attribute:\n \n if value[1] > self.detections.rotationClass[0] and value[1] < self.detections.rotationClass[-1]:\n print \"direct hit\", attribute, value[1]\n self.detections.classification = \"Direct hit\"\n #if self.detections.\n else:\n for angleStamp in self.detections.rotationClass:\n if secondsCount(value[1],angleStamp).total_seconds < 10:\n self.detections.classification = \"Near miss\"\n \n else:\n self.detections.classification = \"Nothing impressive\"\n print \"Ending Classification\"",
"def demo(net, image_name,num_class,save_ff):\r\n\r\n # Load the demo image\r\n #im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\r\n im_file=image_name\r\n im = cv2.imread(im_file)\r\n\r\n # Detect all object classes and regress object bounds\r\n timer = Timer()\r\n timer.tic()\r\n #for zzz in range(100):\r\n scores, boxes = im_detect(net, im)\r\n timer.toc()\r\n print ('Detection took {:.3f}s for '\r\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\r\n\r\n # Visualize detections for each class\r\n CONF_THRESH = 0.35\r\n NMS_THRESH = 0.3\r\n thresh=CONF_THRESH\r\n for cls_ind, cls in enumerate(range(num_class)):#CLASSES[1:]\r\n cls_ind += 1 # because we skipped background\r\n # cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\r\n # cls_scores = scores[:, cls_ind]\r\n # dets = np.hstack((cls_boxes,\r\n # cls_scores[:, np.newaxis])).astype(np.float32)\r\n inds = np.where(scores[:, cls_ind] > thresh)[0]\r\n cls_scores = scores[inds, cls_ind]\r\n if cfg.TEST.AGNOSTIC:\r\n cls_boxes = boxes[inds, 4:8]\r\n else:\r\n cls_boxes = boxes[inds, cls_ind*4:(cls_ind+1)*4]\r\n dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\r\n .astype(np.float32, copy=False)\r\n keep = nms(dets, NMS_THRESH)\r\n dets = dets[keep, :]\r\n #vis_detections(im, cls, dets, thresh=CONF_THRESH)\r\n inds = np.where(dets[:, -1] >= thresh)[0]\r\n if len(inds) == 0:\r\n continue\r\n\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n for i in inds:\r\n bbox = dets[i, :4]\r\n score = dets[i, -1]\r\n print bbox,score,cls\r\n cv2.rectangle(im_tmp, (bbox[0],bbox[1]), (bbox[2],bbox[3]), (0,0,255),2)\r\n #save_ff=\"/storage2/liushuai/faster_rcnn/FasterRCNN-Encapsulation-Cplusplus/faster_cxx_lib_ev2641/test_result.jpg\"\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n cv2.imwrite(save_ff,im_tmp)\r\n #save_pic(im, cls, dets, thresh=CONF_THRESH,save_ff)\r",
"def identify_class(self, cls):",
"def isClass(self, className):\n return self.characterClass == className or self.baseClass == className",
"def classname(class_object):\n return class_object.__class__.__name__",
"def get_num_classes(self):",
"def test_labels_encoder_no_classes(self):\n\n class L2UTransformer(object):\n def transform(self, y):\n return np.array([yi.upper() for yi in y])\n\n oz = ClassificationScoreVisualizer(GaussianNB(), encoder=L2UTransformer())\n with pytest.warns(YellowbrickWarning, match=\"could not determine class labels\"):\n assert oz._labels() is None",
"def test_labels(self):\n classes = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"])\n y = classes[np.random.randint(0, 5, 100)]\n\n oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)\n npt.assert_array_equal(oz._labels(), classes)\n\n encoder = dict(zip(range(len(classes)), classes))\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._labels(), classes)\n\n encoder = LabelEncoder().fit(y)\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._labels(), classes)",
"def isclass(object):\r\n return isinstance(object, (type, types.ClassType))",
"def get_classification(self, image):\n\n image_np_expanded = np.expand_dims(image, axis=0)\n\n # Perform network inference\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores,\n self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n if self.RUNNING_ON_CARLA == True:\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > .05:\n if classes[i] == 10:\n classname = self.category_index[classes[i]]['name']\n print(classname, scores[i])\n\n # Extract image from best bounding box and pass through light classifier\n ymin, xmin, ymax, xmax = boxes[i]\n im_height, im_width, im_depth = image.shape\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)\n tf_image_cropped = image[int(top):int(bottom), int(left):int(right), :]\n\n PILImage = Image.fromarray(tf_image_cropped)\n resized_img = PILImage.resize((85, 256), Image.ANTIALIAS)\n image_np_resized = self.load_image_into_numpy_array(resized_img)\n x = np.expand_dims(image_np_resized, axis=0)\n x = np.vstack([x])\n\n #model = load_model('tf_classifier_1.h5')\n #model.compile(loss='categorical_crossentropy',\n # optimizer='adam',\n # metrics=['accuracy'])\n classes = self.keras_model.predict_classes(x, batch_size=1)\n print(classes)\n\n if classes[0] == 0:\n self.current_light = TrafficLight.GREEN\n elif classes[0] == 2:\n self.current_light = TrafficLight.YELLOW\n else:\n self.current_light = TrafficLight.RED\n\n break\n\n else:\n # Check the detections. If it has a good score\n # then set the current light to the detected label. The\n # first one is always the best (they are returned sorted \n # in score order).\n # Note that we have trained for 14 categories, including\n # left/right arrows etc. Here we are only looking for \n # standard red, yellow and green light and ignore others.\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > .05:\n classname = self.category_index[classes[i]]['name']\n print(classname, scores[i])\n\n if classname == 'Green':\n self.current_light = TrafficLight.GREEN\n elif classname == 'Yellow':\n self.current_light = TrafficLight.YELLOW\n elif classname == 'Red':\n self.current_light = TrafficLight.RED\n else:\n self.current_light = TrafficLight.UNKNOWN\n\n break\n\n return self.current_light",
"def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)",
"def check_classes(class_name: str) -> str:\n classes_list = []\n class_directory = base_directory\n # Print out all classes in teh class_directory\n for i in os.listdir(class_directory):\n if i.startswith(\".\"):\n pass\n else:\n # Append name off classes to the list\n classes_list.append(i)\n # Check to see if the name of the class is in the list\n if class_name in classes_list:\n current_directory = os.path.join(base_directory, class_name, \"/\")\n return current_directory\n else:\n cprint(f\"{class_name} is not a class, creating new folder\", \"red\")\n new_directory = os.path.join(class_directory, class_name)\n os.mkdir(new_directory)\n cprint(f\"path {new_directory} created\", \"red\")\n return new_directory",
"def object_detect(filename):\n cv2.ocl.setUseOpenCL(False)\n just_fname = filename.split(\".\")[0]\n image = cv2.imread('./static/uploads/' + filename)\n bbox, label, conf = cv.detect_common_objects(image)\n output_image = draw_bbox(image, bbox, label, conf)\n plt.imshow(output_image)\n plt.savefig(os.path.join('./static/output/', just_fname + '.png'))\n d = Counter(label)\n if not label:\n return \"No objects detected\"\n labelstr = \", \".join('{} {}'.format(v, k) for k, v in d.items())\n return labelstr",
"def object_detection(self):\r\n pass",
"def classify(self):\n infer = self.model.signatures['serving_default']\n for i, original_image in enumerate(self.images):\n image = original_image.copy()\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n image = cv.resize(image, (self.image_size, self.image_size))\n image = image / 255.\n\n image = [image]\n image = np.asarray(image).astype(np.float32)\n batch_data = tf.constant(image)\n pred_bbox = infer(batch_data)\n for key, value in pred_bbox.items():\n boxes = value[:, :, 0:4]\n pred_conf = value[:, :, 4:]\n\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\n scores=tf.reshape(\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\n max_output_size_per_class=10,\n max_total_size=10,\n iou_threshold=FLAGS.iou,\n score_threshold=FLAGS.score\n )\n\n height, width, _ = original_image.shape\n\n print(scores)\n classes = classes[0]\n print(classes)\n\n bbox = boxes[0][0].numpy()\n bbox[0] = int(bbox[0] * height)\n bbox[2] = int(bbox[2] * height)\n bbox[1] = int(bbox[1] * width)\n bbox[3] = int(bbox[3] * width)\n\n if BIRD_CLASS in classes:\n idx = np.where(classes == BIRD_CLASS)\n bbox = bbox.astype(np.int)\n x = int((bbox[1] + bbox[3]) / 2)\n y = int((bbox[0] + bbox[2]) / 2)\n self.thumbnail_center.append((x, y))\n cropped_img = original_image[bbox[0]:bbox[2], bbox[1]: bbox[3]]\n self.bird_images.append(cropped_img)\n self.confidence_arr.append(scores[idx[0][0]][0])\n\n self.generate_thumbnail(size=150)",
"def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')",
"def class_uc(x):\r\n if Class(x) == \"G\" :\r\n return 1\r\n else :\r\n if Class(x) == \"I\" :\r\n return 2\r\n else :\r\n return 0",
"def classes(self):\n #print \"making classes again!\"\n l = []\n for p in self.marks:\n l.append(psi_class(self,p))\n for d in range(1, self.dimension + 1):\n l.append(kappa_class(self,d))\n for i in range(1, self.genus+1):\n l.append(chern_char(self, 2*i-1))\n if True:#self.genus != 0:\n l.append(irreducible_boundary(self))\n marks = set(self.marks)\n reducible_boundaries = []\n if self.n != 0:\n first_mark_list = [marks.pop()] \n for g1 in range(0, self.genus + 1):\n for p in subsets(marks):\n r_marks = set(first_mark_list + p)\n if 3*g1 - 3 + len(r_marks) + 1 >= 0 and 3*(self.genus-g1) - 3 + self.n - len(r_marks) + 1 >= 0:\n reducible_boundaries.append( reducible_boundary(self, Mgn(g1, r_marks)) )\n \n reducible_boundaries.sort(key = lambda b: sorted(list(b.component1.marks)))\n reducible_boundaries.sort(key = lambda b: len(b.component1.marks))\n reducible_boundaries.sort(key = lambda b: b.component1.genus)\n \n else: #self.n == 0\n for g1 in range(1, floor(self.genus/2.0)+1):\n reducible_boundaries.append(reducible_boundary(self, Mgn(g1, []))) \n \n \n l += reducible_boundaries \n \n for i in range(1,self.genus+1):\n l.append(lambda_class(self,i))\n return l",
"def safe_isinstance(obj, class_path_str):\n # this function is copy-paste from the code of the SHAP Python library\n # Copyright (c) 2018 Scott Lundberg\n if isinstance(class_path_str, str):\n class_path_strs = [class_path_str]\n elif isinstance(class_path_str, list) or isinstance(class_path_str, tuple):\n class_path_strs = class_path_str\n else:\n class_path_strs = ['']\n\n # try each module path in order\n for class_path_str in class_path_strs:\n if \".\" not in class_path_str:\n raise ValueError(\"class_path_str must be a string or list of strings specifying a full \\\n module path to a class. Eg, 'sklearn.ensemble.RandomForestRegressor'\")\n\n # Splits on last occurence of \".\"\n module_name, class_name = class_path_str.rsplit(\".\", 1)\n\n # here we don't check further if the model is not imported, since we shouldn't have\n # an object of that types passed to us if the model the type is from has never been\n # imported. (and we don't want to import lots of new modules for no reason)\n if module_name not in sys.modules:\n continue\n\n module = sys.modules[module_name]\n\n #Get class\n _class = getattr(module, class_name, None)\n\n if _class is None:\n continue\n\n if isinstance(obj, _class):\n return True\n\n return False"
] |
[
"0.6626742",
"0.63194853",
"0.6168209",
"0.5960394",
"0.59308374",
"0.59003305",
"0.57764536",
"0.57340455",
"0.5691106",
"0.56500655",
"0.5616511",
"0.5613041",
"0.5608129",
"0.560139",
"0.55893856",
"0.55811214",
"0.556558",
"0.5529344",
"0.5505262",
"0.547894",
"0.5475821",
"0.5466577",
"0.546575",
"0.54563653",
"0.5456032",
"0.5448329",
"0.5446003",
"0.5445391",
"0.54336184",
"0.5431017"
] |
0.6471579
|
1
|
syncroomaware users list. optional parameter conversation_id to get a list of users in other rooms. will include users in linked syncrooms. append "rooms" to segment user list by individual rooms.
|
def syncusers(bot, event, *args):
if not bot.get_config_option('syncing_enabled'):
return
combined = True
tokens = list(args)
if "rooms" in args:
tokens.remove("rooms")
combined = False
if "rooms" in args:
tokens.remove("room")
combined = False
if len(args) == 0:
filter_convs = [ event.conv_id ]
else:
filter_convs = tokens
target_conv = filter_convs.pop(0)
user_lists = _syncout_users(bot, target_conv)
if not user_lists:
yield from bot.coro_send_message(event.conv_id, "no users were returned")
return
_lines = []
for room_id in user_lists:
if combined and room_id != "*":
# list everything, only use wildcard
continue
elif not combined and room_id == "*":
# list room-by-room, skip wildcard
continue
if filter_convs and room_id not in filter_conv and room_id != target_conv:
# if >1 conv id provided, filter by only supplied conv ids
continue
if room_id == "*":
_lines.append("**all syncout rooms**")
else:
_lines.append("**{} ({})**".format( bot.conversations.get_name(room_id),
room_id ))
user_list = user_lists[room_id]
for chat_id in user_list:
_lines.append("* {}".format(user_list[chat_id].full_name))
yield from bot.coro_send_message(event.conv_id, "\n".join(_lines))
"""
# are we in a sync room?
sync_room_list = None
for _rooms in syncouts:
if conversation_id in _rooms:
sync_room_list = _rooms
_lines.append(_("<b>Sync Rooms: {}</b>").format(len(sync_room_list)))
break
if sync_room_list is None:
sync_room_list = [conversation_id]
_lines.append(_("<b>Standard Room</b>"))
all_users = {}
try:
if combined or len(sync_room_list) == 1:
all_users["_ALL_"] = bot.get_users_in_conversation(sync_room_list)
else:
for room_id in sync_room_list:
all_users[room_id] = bot.get_users_in_conversation(room_id)
except KeyError as e:
# most likely raised if user provides invalid room list
yield from bot.coro_send_message(event.conv, _('<b>failed to retrieve user list</b>'))
return
unique_users = []
for room_id in all_users:
if room_id is not "_ALL_":
_line_room = '<i>{}</i>'.format(room_id)
_line_room = '<b>{}</b> {}'.format(
bot.conversations.get_name(room_id),
_line_room)
_lines.append(_line_room)
list_users = all_users[room_id]
for User in list_users:
_line_user = '{}'.format(User.full_name);
if User.emails:
_line_user = _line_user + ' ({})'.format(User.emails[0])
_lines.append(_line_user)
unique_users.append(User)
unique_users = list(set(unique_users))
_lines.append(_("<b>Total Unique: {}</b>").format(len(unique_users)))
yield from bot.coro_send_message(event.conv, '<br />'.join(_lines))
"""
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def get_app_service_users_in_room(\n self,\n room_id: str,\n app_service: \"ApplicationService\",\n cache_context: _CacheContext,\n ) -> Sequence[str]:\n # We can use `get_local_users_in_room(...)` here because an application service\n # can only be interested in local users of the server it's on (ignore any remote\n # users that might match the user namespace regex).\n local_users_in_room = await self.get_local_users_in_room(\n room_id, on_invalidate=cache_context.invalidate\n )\n return list(filter(app_service.is_interested_in_user, local_users_in_room))",
"def get_user_list():\n users_tuple = db_session.query(Chat.chatID).all()\n users_list = [user for user, in users_tuple]\n return users_list",
"def get_users_list(self, session):\n\n users = session.query(User.chat_id).filter(User.is_admin==False).all()\n return users",
"def _list(room_name):\n members = redis.smembers(room_name)\n \n if str(members) == 'set()':\n text = '```Users in list: none```'\n return text\n\n text = 'Users in list: %s ' % ','.join(members)\n \n return text",
"def online_users(room):\n threshold = datetime.now() - timedelta(seconds=10)\n authorizations = models.Authorization.gql(\"WHERE room = :room AND last_checked_in >= :threshold\", room=room, threshold=threshold).fetch(1000)\n return [x.user for x in authorizations]",
"def handle_list_room(self, lobby_command, client_socket):\n print(\"Handling list command...\")\n msg = ''\n words = lobby_command.split()\n # List all rooms\n if len(words) == 1:\n msg = 'Available Rooms:\\n'\n for room in self.rooms:\n msg += f'\\t\\t{room.name}\\n'\n \n self.just_send(client_socket, msg)\n return\n else:\n # List all rooms and members\n roomname = words[1]\n if roomname == \"all\":\n user = self.clients[client_socket]['data'].decode('utf-8')\n msg = f'All rooms and users:\\n'\n for room in self.rooms:\n msg += f'Room: {room.name}\\nUsers: '\n for user in room.room_attrbts['members']:\n msg += f'\\t{user}'\n if user in room.room_attrbts['admins']:\n msg += ' - Admin'\n msg += '\\n'\n msg += '\\n'\n self.just_send(client_socket, msg)\n return\n\n # List user's room membership\n if roomname == \"mine\":\n user = self.clients[client_socket]['data'].decode('utf-8')\n msg = f'Rooms user {user} has joined:\\n'\n for room in self.rooms:\n if user in room.room_attrbts['members']:\n msg += f'\\t\\t{room.name}'\n if user in room.room_attrbts['admins']:\n msg += ' - Admin'\n msg += '\\n'\n self.just_send(client_socket, msg)\n return\n \n # List membership and active users of a room\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Request roomname found..\")\n msg = f'User members of room {roomname}:\\n'\n for member in _room.room_attrbts['members']:\n msg += f'\\t\\t{member}\\n'\n msg+= '\\n'\n self.just_send(client_socket, msg)\n \n msg = 'Users active in room:\\n'\n for active_user in _room.room_attrbts['active']:\n msg += f'\\t\\t{active_user}\\n'\n self.just_send(client_socket, msg)\n return\n if msg == '':\n msg = f'Client passed an invalid room to list members of {roomname}\\n'\n self.log_and_send(client_socket, msg)\n return",
"def get_users_list_full(self, session):\n\n users = session.query(\n User.chat_id,\n User.is_banned,\n User.username,\n User.first_name,\n User.last_name,\n User.time_registered\n ).filter(User.is_admin==False).all()\n return users",
"def get_members(self, *, room: Room) -> List[User]:\n return room.members",
"def get_rooms(user_id, org_id):\n\n helper = DataStorage()\n helper.organization_id = org_id\n query = {\"room_user_ids\":user_id}\n options = {\"sort\":{\"created_at\":-1}}\n response = helper.read_query(\"dm_rooms\", query=query, options=options)\n\n if response and \"status_code\" not in response:\n return response\n return []",
"def user_list(ctx):\n data = ctx.obj.get_all_users()\n output_json_data(data)",
"def _users_list(self):\n result = self.slack.api_call(\"users.list\", presence=0)\n\n if not result.get(\"ok\"):\n logging.error(result['error'])\n return None\n\n return result['members']",
"def get_users_admins_list(self, session):\n\n users = session.query(User.chat_id).all()\n return users",
"def get_room_members():\n incoming = request.get_json()\n res = dispatch(Chatroom.get_room_members_with_room_id(incoming['room_id']))\n members = [{'user_id': row[0], 'username': row[1]} for row in res]\n return jsonify(results = members)",
"def list(self, room=None, user=None):\n if room is not None:\n room = str(room)\n if user is not None:\n user = str(user)\n with self._lock:\n # minimise locking time by creating a copy to iterate over\n notifications = deepcopy(self.notifications)\n\n for room_id, regexes in notifications.items():\n if not (room is None or room == room_id):\n continue\n for regex, users in regexes.items():\n if user is None:\n for user_id in users:\n yield room_id, regex, user_id, self.users[user_id]\n elif user in users:\n yield room_id, regex, user, self.users[user]",
"def list_users(bookings):\n return[view_user(booking.user) for booking in bookings]",
"def user_list(server_object, client, address, command_args):\n\n\tmsg = \"\"\n\n\t#: Create a formatted string of all the users.\n\tfor usr in server_object.usrs.values():\n\t\tmsg += usr + '\\n'\n\n\tclient.send(msg.encode())",
"def get_all_users():",
"def list_users(self):\n raise NotImplementedError",
"def get_user_messages(user_id):\n pass \n # user_message_list = []\n\n # for message in sent messages:",
"def _get_users_list(self):\n return self.users['user_id'].tolist()",
"def user_list(request_dict):\n users = User.query.all()\n users_list = list()\n for user in users:\n users_list.append(user)\n\n return JSONTools.user_list_reply(users_list)",
"def list_users(self, user=None):\n from expfactory.database.models import Participant\n\n participants = Participant.query.all()\n users = []\n for user in participants:\n users.append(self.print_user(user))\n return users",
"def list_users(self, stream_name:str, version:int=1)->List[str]:\n stream_path = self._get_storage_path(stream_name=stream_name, version=version)\n all_users = self._ls_dir(stream_name=stream_name, version=version)\n user_ids = []\n for usr in all_users:\n user_ids.append(usr.replace(stream_path,\"\").replace(\"user=\",\"\").replace(\"study=\"+self.study_name, \"\"))\n return user_ids",
"def get_messages():\n incoming = request.get_json()\n messages = Message.get_messages_from_room_id(incoming['room_id'])\n messages = [{'user_id': message.user_id, \n 'sendTime': message.sendTime, 'content': message.content} for message in messages]\n for message in messages:\n user = User.get_user_with_user_id(message['user_id'])\n message['username'] = str(user.username)\n return jsonify(results = messages)",
"def messages_list(from_username, to_username):\n User.query.get_or_404(from_username)\n User.query.get_or_404(to_username)\n\n messages = Message.find_all(from_username, to_username)\n serialized = [message.serialize() for message in messages]\n return (jsonify(messages=serialized), 200)",
"def users(self,org_id=None):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))",
"def get_chartooms():\n result = dispatch(Chatroom.get_chatroom_with_user_id(session['user_id']))\n rooms = [{'room_id': row[0], 'name': row[1]} for row in result]\n for room in rooms:\n res = dispatch(Chatroom.get_room_members_with_room_id(room['room_id']))\n res = [ row[1] for row in res]\n room['members'] = res\n return jsonify(results = rooms)",
"def refresh_userlist(self):\n if self._userlist is not None:\n self._userlist.delete(0, Tix.END)\n if self._channel in self._frame.client.channels:\n ops = [ ]\n voices = [ ]\n users = [ ]\n l = self._frame.client.channels[self._channel].nicknames\n for name in l:\n ni = l[name]\n mode = l[name].mode\n if 'o' in mode:\n ops.append(\"@\" + ni.nickname)\n elif 'v' in mode:\n voices.append(\"+\" + ni.nickname)\n else:\n users.append(ni.nickname)\n l = (sorted(ops, key=unicode.lower)\n + sorted(voices, key=unicode.lower)\n + sorted(users, key=unicode.lower))\n for name in l:\n self._userlist.insert(Tix.END, name)\n self._userlistlabel.configure(\n text=\"%d Users, %d OPs\" % (len(l), len(ops)))",
"def get_conversation_list(request):\n collected_values = {}\n\n # Only accept GET requests for this endpoint\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n # Extract params\n uid = request.GET['uid']\n token = request.GET['token']\n limit = int(request.GET['limit']) # Force a limiter to see how many users to get\n\n # Check if the token is valid\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n # Maybe cache or find better way of getting most recent id's messaged\n # Do a walkthrough of all messages and count totals\n # Potential Improvement is to keep a mapping of all messages sent from users to users\n users = {}\n msg_sent = Messages.objects.filter(user_id=uid).order_by('-created_at')[:limit]\n msg_recieved = Messages.objects.filter(other_id=uid).order_by('-created_at')[:limit]\n for msg in msg_sent:\n if users.get(msg.other_id) is None:\n users[msg.other_id] = 1\n else:\n users[msg.other_id] += 1\n for msg in msg_recieved:\n if users.get(msg.user_id) is None:\n users[msg.user_id] = 1\n else:\n users[msg.user_id] += 1\n\n # Collect return values\n collected_values[\"users\"] = users\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Get Conversation List Result: %s\", collected_values)\n return JsonResponse(collected_values, status=200)",
"def do_user_list(cs, args):\n _, users = cs.users.list()\n fields = ['user_id', 'username', 'email', 'realname', 'comment']\n utils.print_list(users, fields, sortby=args.sortby)"
] |
[
"0.6436777",
"0.62776923",
"0.62760395",
"0.61903304",
"0.6125914",
"0.60984933",
"0.5976745",
"0.59659094",
"0.5931594",
"0.5930427",
"0.5922079",
"0.5916611",
"0.5900496",
"0.5876881",
"0.5788277",
"0.5786364",
"0.57371485",
"0.5726489",
"0.57155997",
"0.5680143",
"0.56464803",
"0.5606815",
"0.5598927",
"0.55539393",
"0.5544944",
"0.55335814",
"0.55048215",
"0.55016494",
"0.55001366",
"0.54993874"
] |
0.75178987
|
0
|
Rename OpsiDepotserver with id `oldId` to `newId`. References to the old id will be changed aswell.
|
def host_renameOpsiDepotserver(self, oldId, newId):
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def rename_var(self, old_id, new_id): # to be overriden in subclasses when necessary\n pass",
"def rename(old, new):",
"def rename(old, new):",
"def rename(oldname, newname):",
"def rename_preset(self, preset_id, new_id, REQUEST=None):\r\n\r\n raise NotImplementedError",
"def rename(self, new_name):\n\n self.__enforce_connected()\n current_url = self.url\n self._set_field(\"name\",new_name)\n self.set_json(self._http_client.update(current_url, self.get_json()))",
"def rename(cls, client, resource, new_name) :\n\t\ttry :\n\t\t\trenameresource = rewriteaction()\n\t\t\tif type(resource) == cls :\n\t\t\t\trenameresource.name = resource.name\n\t\t\telse :\n\t\t\t\trenameresource.name = resource\n\t\t\treturn renameresource.rename_resource(client,new_name)\n\t\texcept Exception as e :\n\t\t\traise e",
"def rename(self, old_remote_path, new_remote_path, storage_id=None):\n client_old, old_remote_path = self._get_storage(old_remote_path, storage_id=storage_id)\n client_new, new_remote_path = self._get_storage(new_remote_path, storage_id=storage_id)\n\n if client_old._storage_id != client_new._storage_id:\n raise ValueError('rename on different storages')\n\n result = client_old.rename(old_remote_path, new_remote_path)\n if result is None: # some storages return nothing when ok and raise exception when error\n return True\n\n return result",
"def updateDefinitionForChangeId(self, oldId, newId):\n rx = re.compile(r\"((?<!\\w)(?<!\\d)(?<!\\.)\" + re.escape(oldId) +\n \"(?!\\d)(?!\\w))(?=(?:[^\\\"]|[\\\"][^\\\"]*[\\\"])*$)\")\n newDef = re.sub(rx, newId, self.definition)\n self._definition = newDef",
"def softwareInstanceRename(self, new_name, computer_id,\n computer_partition_id, slave_reference=None):\n return self._softwareInstanceRename(new_name, computer_id,\n computer_partition_id,\n slave_reference)",
"def rename(self, new_name):\n method = \"rename_cluster\"\n params = {\n \"cluster_id\": self.id,\n 'name': new_name\n }\n return self._client.connection.make_request(method, params)",
"def rename(self, oldname, newname):\n if not isinstance(oldname, str) or not isinstance(newname, str):\n raise TypeError(\"old and new variable names should be str\")\n # unabbreviate oldname\n oldname = self._find_vars(oldname, empty_ok=False)[0] \n if oldname == newname:\n return\n newname = newname.strip()\n \n if not self._is_valid_varname(newname):\n raise ValueError(newname + \" is not a valid Stata name\")\n if newname in self._varlist:\n raise ValueError(newname + \" already exists\")\n \n index = self._varlist.index(oldname)\n self._varlist[index] = newname\n \n # if oldname in chrdict, change to newname\n chrdict = self._chrdict\n if oldname in chrdict:\n chrdict[newname] = chrdict[oldname]\n del chrdict[oldname]\n \n self._changed = True",
"def renameSIdRefs(self, *args):\n return _libsbml.Port_renameSIdRefs(self, *args)",
"def replace_protocol(self, old_id, new_id):\n self._full_path = self._full_path.replace(old_id, new_id)",
"def replace_id(self, old_id, new_id):\n assert isinstance(old_id, core.Variable)\n assert isinstance(new_id, core.Variable)\n assert old_id in self.table and new_id not in self.table\n\n table = list(self.table.items())\n\n for i, (key, op) in enumerate(table):\n if key == old_id:\n new_key = new_id\n else:\n new_key = key\n\n table[i] = (new_key, op.xreplace({old_id: new_id}))\n\n self.table = bidict.OrderedBidict(table)",
"def renameSIdRefs(self, *args):\n return _libsbml.GeneProduct_renameSIdRefs(self, *args)",
"def rename(self, new_name):\n method = \"rename_vault\"\n params = {\n \"vault_id\": self.id,\n 'vault_name': new_name\n }\n return self._client.connection.make_request(method, params)",
"def userRenamed(self, old, new):\n sessions = self.findSessions(old)\n for ss in sessions:\n old = old.decode(ss.encoding)\n new = new.decode(ss.encoding)\n self.sendResponse(ss.rename(old, new))",
"def tag_rename(self, item_id, old_tag, new_tag, **params):\n\n self.queue('tag_rename', item_id=item_id,\n old_tag=old_tag, new_tag=new_tag, **params)",
"def renameSIdRefs(self, *args):\n return _libsbml.SpeciesFeature_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.Species_renameSIdRefs(self, *args)",
"def rename_column(self, table_name, old, new):\r\n # intentionally not quoting names\r\n self.callproc('sp_rename', (table_name + '.' + old, new, 'COLUMN'))",
"def rename(self, oldname, newname):\n self._check_rename(oldname, newname)\n conns = self.find_referring_connections(oldname)\n wflows = self.find_in_workflows(oldname)\n old_autos = self._cleanup_autopassthroughs(oldname)\n\n obj = self.remove(oldname)\n self.add(newname, obj)\n\n # oldname has now been removed from workflows, but newname may be in the wrong\n # location, so force it to be at the same index as before removal\n for wflow, idx in wflows:\n wflow.remove(newname)\n wflow.add(newname, idx)\n\n old_rgx = re.compile(r'(\\W?)%s.' % oldname)\n par_rgx = re.compile(r'(\\W?)parent.')\n\n # recreate all of the broken connections after translating oldname to newname\n for u, v in conns:\n self.connect(re.sub(old_rgx, r'\\g<1>%s.' % newname, u),\n re.sub(old_rgx, r'\\g<1>%s.' % newname, v))\n\n # recreate autopassthroughs\n if self.parent:\n for u, v in old_autos:\n u = re.sub(old_rgx, r'\\g<1>%s.' % '.'.join([self.name, newname]), u)\n v = re.sub(old_rgx, r'\\g<1>%s.' % '.'.join([self.name, newname]), v)\n u = re.sub(par_rgx, r'\\g<1>', u)\n v = re.sub(par_rgx, r'\\g<1>', v)\n self.parent.connect(u, v)",
"def rename_value(model: onnx.ModelProto, old_name: str, new_name: str):\n if old_name == new_name:\n return\n logger = get_root_logger()\n logger.info(f'rename {old_name} -> {new_name}')\n for n in model.graph.node:\n for i, output in enumerate(n.output):\n if output == old_name:\n n.output[i] = new_name\n for i, input in enumerate(n.input):\n if input == old_name:\n n.input[i] = new_name\n for v in model.graph.value_info:\n if v.name == old_name:\n v.name = new_name\n for i, input in enumerate(model.graph.input):\n if input.name == old_name:\n input.name = new_name\n for i, output in enumerate(model.graph.output):\n if output.name == old_name:\n output.name = new_name",
"def changeName(self, userId, newName):\n\t\turi = \"{}/users/{}\".format(tt_base_uri, userId)\n\t\turi_args = {\"name\":newName}\n\t\tr = requests.put(uri, json=uri_args, cookies={\"PLAY_SESSION\":self.play_session, \"__uvt\":\"\"})\n\t\tprint(\"change name: status code:\", r.status_code)",
"def vm_rename(vm_hostname, new_hostname, offline=False):\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] not in ['aws.dct', 'kvm.dct']:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type']\n )\n )\n\n if vm.dataset_obj['puppet_disabled']:\n raise ConfigError(\n 'Rename command only works with Puppet enabled'\n )\n\n if vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n _check_defined(vm)\n\n if not offline:\n raise NotImplementedError(\n 'Rename command only works with --offline at the moment.'\n )\n if not vm.is_running():\n raise NotImplementedError(\n 'Rename command only works online at the moment.'\n )\n\n vm.rename(new_hostname)\n elif vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm.aws_rename(new_hostname)",
"def rename(self, src, dst):\n os.rename(src, dst)",
"def renameSIdRefs(self, *args):\n return _libsbml.InSpeciesTypeBond_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.SBase_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.FluxObjective_renameSIdRefs(self, *args)"
] |
[
"0.66649985",
"0.65495193",
"0.65495193",
"0.63409895",
"0.5982003",
"0.5981632",
"0.5942411",
"0.59153366",
"0.58583134",
"0.5855038",
"0.58548456",
"0.5828306",
"0.58214545",
"0.5795251",
"0.57640916",
"0.57169753",
"0.56786716",
"0.56594557",
"0.56323206",
"0.5627446",
"0.56167436",
"0.56154245",
"0.5601386",
"0.55989975",
"0.5592013",
"0.5568282",
"0.5561627",
"0.5536562",
"0.552796",
"0.55169713"
] |
0.8711123
|
0
|
Sub the center joint 1 (spine joint in ntu dataset
|
def sub_center_joint(data: np.ndarray, silient=False) -> np.array:
N, M, T, V, C = data.shape
# new_data = np.zeros((N, M, T, V+1, C))
# new_data[:, :, :, :V, :] = data
new_data = data.copy()
#sub center joint
for i_s, sample in enumerate(tqdm(new_data, disable=silient)):
if sample.sum() == 0:
continue
#T,1, C
#todo: them option version cho viec chuan hoa nhu ong tac gia, normalize o joint 2
main_body_center = sample[0][:, 0:1, :].copy()
for i_b, body in enumerate(sample):
if body.sum() == 0:
continue
#mask for saving null frame zeros at last of video T,1,1
mask = (body.sum((-1,-2)) != 0).reshape(T, 1, 1)
#position of center joint in the first frame. 1, 1, C
ts_start_of_center_joint = body[0:1, 0:1, :]
#positions of center joint in every frames. T,1,C
ts_position_of_center_joint_every_frames = body[:, 0:1, :]
#movement of center joint comparated to the start position. T,1,C
ts_movement_of_center_joint = ts_position_of_center_joint_every_frames - ts_start_of_center_joint
#T,V,C
new_data[i_s, i_b, :, 0:V, :] = (data[i_s, i_b] - main_body_center) * mask
#T,1,C
if V==26:
new_data[i_s, i_b, :, V-1:,:] = ts_movement_of_center_joint * mask
return new_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_center_of_mass_allies(self,obs):",
"def transform(self, joint):\n num_dims = len(joint.shape) \n joint_axes = (colon,) * num_dims + (None, None)\n mask_axes = (None,)*(num_dims-1) + (colon, colon, colon)\n prefix = (joint[joint_axes] * self.mask[mask_axes]).sum(axis=-3)\n return prefix / prefix.sum()",
"def center_protein(traj, inplace=True):\n create_bonds(traj.topology)\n return traj.image_molecules(inplace=inplace, make_whole=True)",
"def gen_center(T, y):\r\n T_pos = [T[i] for i in range(len(y)) if y[i] == 1]\r\n C = np.mean(T_pos, 0).reshape(1, -1)\r\n return C",
"def recenter(self):\n self.centroid = self.consensus()\n return self.centroid",
"def get_center_of_mass_enemies(self,obs):",
"def _centre(self, period):\n if self.direction():\n mx = self.data[-1]\n else:\n mx = self.data[0]\n\n return ((mx // period) * period).squeeze()",
"def exmid2(a, s):\n\tcx=a.shape[0]/2\n\tcy=a.shape[1]/2\n\treturn a[cx-s-1:cx+s, cy-s-1:cy+s]",
"def hamiltonian_sub(self, s, N_s = 10, starting_at = 0):\n if N_s == self.N and starting_at==0:\n return self.hamiltonian(s[:3*self.N])\n s1 = s.copy()\n N = self.N\n s1 = spinlib.cycle3D(s, N, -starting_at)\n s1[N_s:N].fill(0)\n s1[N+N_s:2*N].fill(0)\n s1[2*N+N_s:].fill(0)\n return self.hamiltonian(s1[:3*self.N])",
"def _mn_cov_ ( self , size = -1 , root = False ) :\n #\n if size <= 0 : size = len ( self )\n size = min ( size , len ( self ) ) \n #\n from array import array\n matrix = array ( 'd' , [ 0 for i in range(0, size * size) ] )\n self.mnemat ( matrix , size )\n #\n import ostap.math.linalg\n from ostap.core.core import Ostap \n mtrx = Ostap.Math.SymMatrix ( size )() \n for i in range ( 0 , size ) :\n for j in range ( i , size ) : \n mtrx [ i , j ] = matrix [ i * size + j ]\n \n return mtrx",
"def mask_center_label ( gray ) :\n\n assert gray is not None\n\n # s = ndimage.generate_binary_structure(2,2) # iterate structure\n label_im, nb_labels = label(gray)\n\n # get center label\n\n h = label_im.shape[0]\n w = label_im.shape[1]\n\n l = label_im [h//2,w//2]\n\n gray [ label_im == l ] = 255\n gray [ label_im != l ] = 0\n\n return gray",
"def middle(self):\n return self.start.point_at_distance(self.length / 2, self.heading)",
"def center_of_mass_polyhedron():\n raise NotImplementedError",
"def middle(self):\n return self.point_and_heading_at_offset(self.length/2)",
"def centroid(self, unit='spatial'):\n com = ndimage.center_of_mass(self.data)\n if unit != 'spatial':\n return com\n else:\n # tuple - cast from generator\n # sample spacing - indices to units\n # x-c -- index shifted from center\n return tuple(self.sample_spacing * (x-c) for x, c in zip(com, (self.center_y, self.center_x)))",
"def narration_target(self):",
"def robot6_sphericalwrist_invkin(robot, desired_pose, last_joints = None):\n \n \n \n R06 = desired_pose.R\n p0T = desired_pose.p\n \n if robot.R_tool is not None and robot.p_tool is not None:\n R06 = R06.dot(np.transpose(robot.R_tool))\n p0T = p0T - R06.dot(robot.p_tool)\n \n H = robot.H\n P = robot.P\n \n theta_v = []\n \n #Correct for spherical joint position vectors\n if not np.all(P[:,4] == 0):\n P4_d = P[:,4].dot(H[:,3])\n assert np.all(P[:,4] - P4_d*H[:,3] == 0)\n P[:,3] += P[:,4]\n P[:,4] = np.zeros(3)\n \n if not np.all(P[:,5] == 0):\n P5_d = P[:,5].dot(H[:,5])\n assert np.all(P[:,5] - P5_d*H[:,5] == 0)\n P[:,6] += P[:,5]\n P[:,5] = np.zeros(3) \n \n d1 = np.dot(ey, P[:,1] + P[:,2] + P[:,3])\n v1 = p0T - R06.dot(P[:,6]) \n p1 = ey\n \n Q1 = rox.subproblem4(p1, v1, -H[:,0], d1)\n \n normalize = normalize_joints(robot, last_joints)\n \n for q1 in normalize(0, Q1):\n \n R01=rox.rot(H[:,0], q1)\n \n p26_q1 = R01.T.dot(p0T - R06.dot(P[:,6])) - (P[:,0] + P[:,1])\n \n d3 = np.linalg.norm(p26_q1)\n v3 = P[:,2] \n p3 = P[:,3]\n Q3 = rox.subproblem3(p3, v3, H[:,2], d3)\n \n for q3 in normalize(2,Q3):\n \n R23=rox.rot(H[:,2],q3)\n \n v2 = p26_q1 \n p2 = P[:,2] + R23.dot(P[:,3])\n q2 = rox.subproblem1(p2, v2, H[:,1])\n \n q2 = normalize(1, [q2])\n if len(q2) == 0:\n continue\n q2 = q2[0] \n \n R12 = rox.rot(H[:,1], q2)\n \n R03 = R01.dot(R12).dot(R23)\n \n R36 = R03.T.dot(R06)\n \n v4 = R36.dot(H[:,5]) \n p4 = H[:,5]\n \n Q4_Q5 = rox.subproblem2(p4, v4, H[:,3], H[:,4])\n \n for q4, q5 in normalize((3,4), Q4_Q5):\n \n R35 = rox.rot(H[:,3], q4).dot(rox.rot(H[:,4], q5))\n R05 = R03.dot(R35)\n R56 = R05.T.dot(R06)\n \n p6 = H[:,4]\n v6 = R56.dot(H[:,4])\n \n q6 = rox.subproblem1(p6, v6, H[:,5])\n \n q6 = normalize(5, [q6])\n if len(q6) == 0:\n continue\n q6 = q6[0]\n \n theta_v.append(np.array([q1, q2, q3, q4, q5, q6])) \n if last_joints is not None:\n theta_dist = np.linalg.norm(np.subtract(theta_v,last_joints), axis=1)\n return [theta_v[i] for i in list(np.argsort(theta_dist))]\n else:\n return theta_v",
"def center_barrier( self, verbose=False ):\n reactant_indicator, product_indicator = self.get_basin_indicators(self.init_path)\n n_react = np.sum(reactant_indicator)\n n_prod = np.sum(product_indicator)\n diff = np.abs(n_react-n_prod)\n delta = int(diff/2)\n basin = \"\"\n if ( n_react > n_prod ):\n # Remove the first slices from the reactant side\n self.init_path[\"energy\"] = self.init_path[\"energy\"][delta:]\n self.init_path[\"symbols\"] = self.init_path[\"symbols\"][delta:]\n self.nuc_mc.set_state( self.init_path[\"symbols\"][-1] )\n self.nuc_mc.current_energy = self.init_path[\"energy\"][-1]\n basin = \"product\"\n elif ( n_prod > n_react ):\n # Remove the last slices from the product side\n self.init_path[\"energy\"] = self.init_path[\"energy\"][:-delta]\n self.init_path[\"symbols\"] = self.init_path[\"symbols\"][:-delta]\n self.nuc_mc.set_state( self.init_path[\"symbols\"][0] )\n self.nuc_mc.current_energy = self.init_path[\"energy\"][0]\n basin = \"reactant\"\n\n new_path = {\"symbols\":[], \"energy\":[]}\n for i in range(delta):\n self.nuc_mc.network.reset()\n self.nuc_mc.sweep(nsteps=self.nsteps_per_sweep)\n self.nuc_mc.network(None)\n print(self.nuc_mc.network.get_statistics())\n new_path[\"energy\"].append(self.nuc_mc.current_energy)\n new_path[\"symbols\"].append( [atom.symbol for atom in self.nuc_mc.atoms] )\n\n if basin == \"reactant\":\n if not self.nuc_mc.is_reactant():\n raise RuntimeError(\"System leaving reactants, when starting inside the basin!\")\n elif basin == \"product\":\n if not self.nuc_mc.is_product():\n raise RuntimeError(\"System leaving products when starting inside basin!\")\n\n if basin == \"reactant\":\n self.log(\"Inserting {} states in the beginning of the trajectory\".format(delta))\n self.init_path[\"energy\"] = new_path[\"energy\"][::-1]+self.init_path[\"energy\"]\n self.init_path[\"symbols\"] = new_path[\"symbols\"][::-1]+self.init_path[\"symbols\"]\n else:\n self.init_path[\"energy\"] = self.init_path[\"energy\"]+new_path[\"energy\"]\n self.init_path[\"symbols\"] = self.init_path[\"symbols\"]+new_path[\"symbols\"]\n self.log(\"Appending {} states to the end of the trajectory\".format(delta))",
"def get_middle_joint(joint_a: Joint2D, joint_b: Joint2D) -> Joint2D:\n if not joint_a.is_set or not joint_b.is_set:\n return None\n visibility: JointVisibility\n if joint_a.visibility == JointVisibility.VISIBLE and joint_b.visibility == JointVisibility.VISIBLE:\n visibility = JointVisibility.VISIBLE\n elif joint_a.visibility == JointVisibility.INVISIBLE or joint_b.visibility == JointVisibility.INVISIBLE:\n visibility = JointVisibility.INVISIBLE\n elif joint_a.visibility == JointVisibility.ABSENT or joint_b.visibility == JointVisibility.ABSENT:\n visibility = JointVisibility.ABSENT\n\n return Joint2D(\n x=((joint_a.x + joint_b.x) / 2),\n y=((joint_a.y + joint_b.y) / 2),\n score=(joint_a.score + joint_b.score) / 2,\n visibility=visibility\n )",
"def JointDistn(self):\r\n if len(self.factors)==1:\r\n return self.factors[0]\r\n F = self.factors[0]\r\n for i in range(1,len(self.factors)):\r\n F = F * self.factors[i]\r\n self.JDist = F\r\n return F",
"def dist_from_center_to(x):\n pass",
"def rel_kin(self, joints): # kinematic term\n order1 = [9, 5, 20, 1, 2]\n order2 = [8, 6, 4, 20, 3] # joints' order\n order3 = [10, 4, 8, 0, 20]\n refer1 = [5, 6, 4, 2, 0] # kinseg's order\n refer2 = [6, 5, 4, 3, 1]\n\n segrel = defaultdict(lambda: int(0))\n result = []\n cnts = np.zeros(21)\n\n for i in xrange(len(order1)):\n A = np.array([joints[order1[i]].Position.x, joints[order1[i]].Position.y, joints[order1[i]].Position.z])\n B = np.array([joints[order2[i]].Position.x, joints[order2[i]].Position.y, joints[order2[i]].Position.z])\n C = np.array([joints[order3[i]].Position.x, joints[order3[i]].Position.y, joints[order3[i]].Position.z])\n\n tmp = min(np.abs(np.linalg.norm(A-B)*100-self.kinseg[refer1[i]])/self.kinseg[refer1[i]], 1)\n segrel[order1[i]] += tmp\n segrel[order2[i]] += tmp\n\n tmp = min(np.abs(np.linalg.norm(A-C)*100-self.kinseg[refer2[i]])/self.kinseg[refer2[i]], 1)\n segrel[order1[i]] += tmp\n segrel[order3[i]] += tmp\n\n cnts[order1[i]] += 2\n cnts[order2[i]] += 1\n cnts[order3[i]] += 1\n\n for i in self.trg_jorder:\n result.append(1-(segrel[i]/cnts[i]))\n\n return result",
"def center(x):\n return x - x.mean()",
"def _infer_extremity_joint_centers(\n markers: TimeSeries) -> TimeSeries:\n output = markers.copy(copy_data=False, copy_data_info=False)\n try:\n output.data['ElbowJointCenterR'] = 0.5 * (\n markers.data['LateralHumeralEpicondyleR']\n + markers.data['MedialHumeralEpicondyleR'])\n except KeyError:\n pass\n\n try:\n output.data['ElbowJointCenterL'] = 0.5 * (\n markers.data['LateralHumeralEpicondyleL']\n + markers.data['MedialHumeralEpicondyleL'])\n except KeyError:\n pass\n\n try:\n output.data['KneeJointCenterR'] = 0.5 * (\n markers.data['LateralFemoralEpicondyleR']\n + markers.data['MedialFemoralEpicondyleR'])\n except KeyError:\n pass\n\n try:\n output.data['KneeJointCenterL'] = 0.5 * (\n markers.data['LateralFemoralEpicondyleL']\n + markers.data['MedialFemoralEpicondyleL'])\n except KeyError:\n pass\n\n try:\n output.data['WristJointCenterR'] = 0.5 * (\n markers.data['RadialStyloidR']\n + markers.data['UlnarStyloidR'])\n except KeyError:\n pass\n\n try:\n output.data['WristJointCenterL'] = 0.5 * (\n markers.data['RadialStyloidL']\n + markers.data['UlnarStyloidL'])\n except KeyError:\n pass\n\n try:\n output.data['AnkleJointCenterR'] = 0.5 * (\n markers.data['LateralMalleolusR']\n + markers.data['MedialMalleolusR'])\n except KeyError:\n pass\n\n try:\n output.data['AnkleJointCenterL'] = 0.5 * (\n markers.data['LateralMalleolusL']\n + markers.data['MedialMalleolusL'])\n except KeyError:\n pass\n\n return output",
"def pro_avfid_superoperator_compsubspace(U,L1):\n\n if U.type=='oper':\n inner = U.dag()*U_target\n part_idx = [0, 1, 3, 4] # only computational subspace\n ptrace = 0\n for i in part_idx:\n ptrace += inner[i, i]\n dim = 4 # 2 qubits comp subspace \n\n return np.real(((np.abs(ptrace))**2+dim*(1-L1))/(dim*(dim+1)))\n\n elif U.type=='super':\n kraus_form = qtp.to_kraus(U)\n dim=4 # 2 qubits in the computational subspace\n part_idx = [0, 1, 3, 4] # only computational subspace\n psum=0\n for A_k in kraus_form:\n ptrace = 0\n inner = U_target_diffdims.dag()*A_k # otherwise dimension mismatch\n for i in part_idx:\n ptrace += inner[i, i]\n psum += (np.abs(ptrace))**2\n\n return np.real((dim*(1-L1) + psum) / (dim*(dim + 1)))",
"def principal_strain(strain_tensor_data, k, sample_ID, initial_step, ch_list):\n\n\n k = str(k)\n it = int(initial_step)\n dir = [\"xx\",\"yy\",\"zz\",\"xy\",\"yz\",\"zx\"]\n ch = ch_list.loc[\"ch\",:]\n\n\n\n \"\"\" ~~~~~~~~~~input from data file~~~~~~~~~~~~~~~~~ \"\"\"\n\n sdata = strain_tensor_data\n time_p = sdata.loc[:,\"Elapsed Time\"] \n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n time_n = time_p.values\n t = len(sdata.index)\n\n\n \"\"\" ~~~~~~~~~~Create strain tensor ~~~~~~~~~~~~~~~~~ \"\"\"\n\n stensor = np.empty((t,3,3))\n for i in range(0,t):\n strain = sdata.loc[i+1, dir]\n\n s1 = strain.at[\"xx\"]\n s2 = strain.at[\"xy\"]\n s3 = strain.at[\"zx\"]\n s4 = strain.at[\"yy\"]\n s5 = strain.at[\"yz\"]\n s6 = strain.at[\"zz\"]\n\n stensor[i,:,:] = np.array([[s1,s2,s3],\n [s2,s4,s5],\n [s3,s5,s6]])\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n w,v = LA.eigh(stensor) #calculate eigen vectors & eigenvalues\n\n\n \"\"\" ~~~~~~~~~~ Output data ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n time = time_n[it:]\n\n w = w[it:,:]\n v = v[it:,:,:]\n\n\n v1 = v[:,:,2]\n v2 = v[:,:,1]\n v3 = v[:,:,0]\n\n\n w_ave = np.mean(w, axis=0)\n v_ave = np.mean(v, axis=0)\n\n v1_ave = v_ave[:,2]\n v2_ave = v_ave[:,1]\n v3_ave = v_ave[:,0]\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n def plunge_trend(n):\n \n norm = np.linalg.norm(n)\n n = n/norm\n \n x = n[0]\n y = n[1]\n z = n[2]\n \n plunge = np.arcsin(z) \n \n if x == 0 and y > 0:\n trend = pi*0.5\n elif x == 0 and y < 0:\n trend = pi*1.5\n elif x > 0 and y == 0:\n trend = 0\n elif x < 0 and y == 0:\n trend = pi\n elif x == 0 and y == 0:\n trend = 0\n else:\n trend = np.arctan(abs(y/x))\n \n if x > 0 and y>0:\n trend = trend \n elif x > 0 and y< 0:\n trend = 2*pi - trend\n elif x <0 and y <0:\n trend = 1.5*pi - trend\n elif x <0 and y >0:\n trend = trend + 0.5*pi\n \n plunge = np.rad2deg(plunge)\n trend = np.rad2deg(trend)\n return plunge, trend\n\n\n def plot_schmidt(ax, plunge, trend, style, label = \"\", markersize = 30, alpha = 1):\n if plunge >= 0:\n ax.line(plunge, trend, style,label = label, markersize = markersize, alpha = alpha)\n elif plunge < 0:\n ax.line(-plunge, trend, style,label = label, markerfacecolor = \"#ffffff\", markersize = markersize, alpha = alpha)\n\n\n fig = plt.figure(figsize=(30,30))\n ax = fig.add_subplot(3,1,1,projection=\"stereonet\")\n ax.set_azimuth_ticklabels([\"N\",\"\",\"E\",\"\",\"S\",\"\",\"W\"])\n ax.grid(which=\"both\")\n \"\"\" ~~~~~~~~~~ Lower-himisphere Schmidt net plot of principal strain directions ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n for i in range(1, len(time)):\n plunge111, trend111 = plunge_trend(v1[i,:])\n plot_schmidt(ax,plunge111,trend111, \"ro\", markersize=5)\n\n plunge112, trend112 = plunge_trend(v2[i,:])\n plot_schmidt(ax,plunge112,trend112, \"go\", markersize=5)\n\n plunge113, trend113 = plunge_trend(v3[i,:])\n plot_schmidt(ax,plunge113,trend113, \"bo\", markersize=5)\n\n\n plunge1, trend1 = plunge_trend(v1[0,:])\n plot_schmidt(ax,plunge1,trend1, \"r^\",markersize =20)\n\n plunge2, trend2 = plunge_trend(v2[0,:])\n plot_schmidt(ax,plunge2,trend2, \"g^\",markersize =20)\n\n plunge3, trend3 = plunge_trend(v3[0,:])\n plot_schmidt(ax,plunge3,trend3, \"b^\",markersize =20)\n\n\n plunge1, trend1 = plunge_trend(v1[-1,:])\n plot_schmidt(ax,plunge1,trend1, \"ro\",markersize =20)\n\n plunge2, trend2 = plunge_trend(v2[-1,:])\n plot_schmidt(ax,plunge2,trend2, \"go\",markersize =20)\n\n plunge3, trend3 = plunge_trend(v3[-1,:])\n plot_schmidt(ax,plunge3,trend3, \"bo\",markersize =20)\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\" ~~~~~~~~~~ Lower-himisphere Schmidt net plot of averaged principal strain directions ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n plunge1, trend1 = plunge_trend(v1_ave)\n plot_schmidt(ax,plunge1,trend1, \"r*\",markersize =20, label = \"$\\sigma_1$\")\n\n plunge2, trend2 = plunge_trend(v2_ave)\n plot_schmidt(ax,plunge2,trend2, \"g*\",markersize =20,label = \"$\\sigma_2$\")\n\n plunge3, trend3 = plunge_trend(v3_ave)\n plot_schmidt(ax,plunge3,trend3, \"b*\", markersize =20,label = \"$\\sigma_3$\")\n\n ax.legend(bbox_to_anchor = (1.2, 1), loc=\"upper left\")\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n \n fig.text(0.15,0.7,ch)\n\n\n \"\"\" ~~~~~~~~~~ Plot of max & min horizontal strain directions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n \n zr = np.empty((360,1))\n for i in range(0,360):\n th_deg = i\n th = th_deg*pi*180**(-1) \n\n vector = np.array([[np.cos(th)],[np.sin(th)],[0]])\n sstensor = stensor[-1,:,:]\n z = sstensor.dot(vector)\n zz = vector.T.dot(z)\n zr[i] = zz\n\n th_max = zr.argmax()\n th_min = zr.argmin()\n\n #th_max = th_max*pi*180**(-1) \n #th_min = th_min*pi*180**(-1) \n\n #n_max_1 = np.array([[np.cos(th_max)],[np.sin(th_max)],[0]])\n #n_max_2 = np.array([[np.cos(th_max+pi)],[np.sin(th_max+pi)],[0]])\n\n #n_min_1 = np.array([[np.cos(th_min)],[np.sin(th_min)],[0]])\n #n_min_2 = np.array([[np.cos(th_min+pi)],[np.sin(th_min+pi)],[0]])\n\n plunge11, trend11 = 0, th_max\n plunge12, trend12 = 0, th_max+180\n #plunge11, trend11 = plunge_trend(n_max_1)\n #plunge12, trend12 = plunge_trend(n_max_2)\n plot_schmidt(ax,plunge11,trend11, \"rD\",markersize =30)\n plot_schmidt(ax,plunge12,trend12, \"rD\",markersize =30)\n\n plunge22, trend22 = 0, th_min\n plunge23, trend23 = 0, th_min + 180\n #plunge22, trend22 = plunge_trend(n_min_1)\n #plunge23, trend23 = plunge_trend(n_min_2)\n plot_schmidt(ax,plunge22,trend22, \"bD\",markersize =30)\n plot_schmidt(ax,plunge23,trend23, \"bD\",markersize =30)\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n \"\"\" ~~~~~~~~~~ Plot of time change of principal strain magnitudes ~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n ax1 = fig.add_subplot(3,1,2)\n w1 = w[:,2]-w[0,2]\n w2 = w[:,1]-w[0,1]\n w3 = w[:,0]-w[0,0]\n time = time[:]-time[0]\n\n\n ax1.plot(time,w1,label=\"$\\epsilon_1$\")\n ax1.plot(time,w2,label=\"$\\epsilon_2$\")\n ax1.plot(time,w3,label=\"$\\epsilon_3$\")\n ax1.set(xlabel=\"Elapsed Time[h]\",ylabel=\"Strain[$\\mu$strain]\")\n ax1.legend()\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n \"\"\" ~~~~~~~~~~ Plot of time change of principal strain magnitudes ratios ~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n ax2 = fig.add_subplot(3,1,3)\n w1 = w1[1:]\n w2 = w2[1:]\n w3 = w3[1:]\n time1 = time[1:]\n \n w21 = w2/w1\n w31 = w3/w1\n\n ax2.plot(time1,w21,label=\"$\\epsilon_2$/$\\epsilon_1$\")\n ax2.plot(time1,w31,label=\"$\\epsilon_3$/$\\epsilon_1$\")\n ax2.set(xlabel=\"Elapsed Time[h]\")\n ax2.legend()\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n fig.suptitle(sample_ID+\"_\"+k,fontsize=\"large\", fontweight=\"bold\")\n fig.savefig(\"result_\"+sample_ID+\"_\"+k+\".png\")\n plt.close(fig)\n\n return w, v",
"def cutout(self, centre, radius):",
"def center(self):\n return np.array([0,0,1/self.C+self.pos()])",
"def segment_normal(end, start):\n return rotate90cw(unit(make_vector(end, start)))",
"def get_clust_cent(self):\r\n\r\n return self.__clust_cent"
] |
[
"0.5900161",
"0.5705653",
"0.53400606",
"0.5208176",
"0.519333",
"0.5152531",
"0.50909877",
"0.5073647",
"0.50445163",
"0.50417227",
"0.5018438",
"0.50131714",
"0.4979352",
"0.49765232",
"0.4962862",
"0.49624056",
"0.4957071",
"0.49393547",
"0.49254608",
"0.49253595",
"0.4923363",
"0.49067426",
"0.49060497",
"0.49044552",
"0.48948497",
"0.4882386",
"0.48690924",
"0.4859305",
"0.4848745",
"0.4846786"
] |
0.6629508
|
0
|
parallel the bone between right shoulder(jpt 8) and left shoulder(jpt 4) of the first person to the x axis
|
def align_horizontal(data: np.ndarray, xaxis=[8, 4], silient=False) -> None:
for i_s, skeleton in enumerate(tqdm(data, disable=silient)):
if skeleton.sum() == 0:
continue
joint_rshoulder = skeleton[0, 0, xaxis[0]]
joint_lshoulder = skeleton[0, 0, xaxis[1]]
axis = np.cross(joint_rshoulder - joint_lshoulder, [1, 0, 0])
angle = get_angle_between(joint_rshoulder - joint_lshoulder, [1, 0, 0])
matrix_x = rotate_matrix(axis, angle)
for i_p, person in enumerate(skeleton):
if person.sum() == 0:
continue
for i_f, frame in enumerate(person):
if frame.sum() == 0:
continue
for i_j, joint in enumerate(frame):
data[i_s, i_p, i_f, i_j] = np.dot(matrix_x, joint)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def align_bone_x_axis(edit_bone, new_x_axis):\n new_x_axis = new_x_axis.cross(edit_bone.y_axis)\n new_x_axis.normalize()\n dot = max(-1.0, min(1.0, edit_bone.z_axis.dot(new_x_axis)))\n angle = math.acos(dot)\n edit_bone.roll += angle\n dot1 = edit_bone.z_axis.dot(new_x_axis)\n edit_bone.roll -= angle * 2.0\n dot2 = edit_bone.z_axis.dot(new_x_axis)\n if dot1 > dot2:\n edit_bone.roll += angle * 2.0",
"def forward(self, x):\r\n self.x = (self.x+(x*(math.cos(self.dir))))\r\n self.y = (self.y+(x*(math.sin(self.dir))))\r\n return (self.x, self.y)",
"def give(r):\n r.rotate(\"r_shoulder_pan_joint\", 0.5)\n time.sleep(2)\n r.rotate(\"r_shoulder_lift_joint\", -1.0)\n time.sleep(2)\n r.rotate(\"r_elbow_flex_joint\", 1.8)\n time.sleep(2)",
"def get_joint_positions(self, joint_angles ): \n\n\n # current angles\n res_joint_angles = joint_angles.copy() \n\n # detect limits\n maskminus= res_joint_angles > self.joint_lims[:,0]\n maskplus = res_joint_angles < self.joint_lims[:,1]\n \n res_joint_angles = res_joint_angles*(maskplus*maskminus) \n res_joint_angles += self.joint_lims[:,0]*(np.logical_not(maskminus) )\n res_joint_angles += self.joint_lims[:,1]*(np.logical_not(maskplus) )\n \n # mirror\n if self.mirror :\n res_joint_angles = -res_joint_angles\n res_joint_angles[0] += np.pi \n \n # calculate x coords of arm edges.\n # the x-axis position of each edge is the \n # sum of its projection on the x-axis\n # and all the projections of the \n # previous edges \n x = np.array([ \n sum([ \n self.segment_lengths[k] *\n np.cos( (res_joint_angles[:(k+1)]).sum() ) \n for k in range(j+1) \n ])\n for j in range(self.number_of_joint) \n ])\n \n # trabslate to the x origin \n x = np.hstack([self.origin[0], x+self.origin[0]])\n\n # calculate y coords of arm edges.\n # the y-axis position of each edge is the \n # sum of its projection on the x-axis\n # and all the projections of the \n # previous edges \n y = np.array([ \n sum([ \n self.segment_lengths[k] *\n np.sin( (res_joint_angles[:(k+1)]).sum() ) \n for k in range(j+1) \n ])\n for j in range(self.number_of_joint) \n ])\n \n # translate to the y origin \n y = np.hstack([self.origin[1], y+self.origin[1]])\n\n pos = np.array([x, y]).T\n \n return (pos, res_joint_angles)",
"def FK_pox(joint_angles, m_mat, s_lst):\n pass",
"def head_towards(self):\n dest = self.target_destination - self.location\n if dest.length() != 0:\n dest.scale_to_length(self.speed)\n dest.normalize()\n self.rect.left += dest.x\n self.rect.top += dest.y",
"def get_first_quadrant(self):\n num_copies_x = ceil(self.max_x / self.room_x)\n num_copies_x = int(num_copies_x)\n num_copies_y = ceil(self.max_y / self.room_y)\n num_copies_y = int(num_copies_y)\n\n player_exp_x = []\n player_exp_y = []\n guard_exp_x = []\n guard_exp_y = []\n # Loop expands along the x axis\n for i in range(0, num_copies_x + 1, 1):\n temp_player_y_list = []\n temp_guard_y_list = []\n r_x = self.room_x * i\n\n if len(player_exp_x) == 0:\n n_p_p_x = self.player_x\n else:\n n_p_p_x = (r_x - player_exp_x[-1][0]) + r_x\n player_exp_x.append([n_p_p_x, self.player_y, 1])\n\n if len(guard_exp_x) == 0:\n n_g_p_x = self.guard_x\n else:\n n_g_p_x = (r_x - guard_exp_x[-1][0]) + r_x\n guard_exp_x.append([n_g_p_x, self.guard_y, 7])\n\n # Loop expands along the x axis\n for j in range(1, num_copies_y + 1, 1):\n r_y = self.room_y * j\n if len(temp_guard_y_list) == 0:\n n_g_p_y = (r_y - self.guard_y) + r_y\n temp_guard_y_list.append(n_g_p_y)\n else:\n n_g_p_y = (r_y - temp_guard_y_list[-1]) + r_y\n temp_guard_y_list.append(n_g_p_y)\n guard_exp_y.append([n_g_p_x, n_g_p_y, 7])\n\n if len(temp_player_y_list) == 0:\n n_p_p_y = (r_y - self.player_y) + r_y\n temp_player_y_list.append(n_p_p_y)\n else:\n n_p_p_y = (r_y - temp_player_y_list[-1]) + r_y\n temp_player_y_list.append(n_p_p_y)\n player_exp_y.append([n_p_p_x, n_p_p_y, 1])\n\n return player_exp_x + guard_exp_x + player_exp_y + guard_exp_y",
"def botStack_x(self):\r\n self.x_stack=self.img.shape[2]-1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5)\r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))",
"def line():\n tt.left(90)\n tt.down()\n tt.forward(50)\n tt.up()\n tt.right(90)\n tt.forward(10)\n tt.right(90)\n tt.forward(50)\n tt.left(90)",
"def rotateLeft(self):\n self.faceHeading+=shipRotationSpeed\n self.reDraw()",
"def CCframe(x1, x3, xy = 0, alpha = 1.0):\n\n # Postitions of the joints\n alpha = str(alpha)\n J0= 0+0j + xy\n J1= 1*exp(1j*x1) + xy\n J2= J1+0.8*exp(1j*(x1+x3))\n pl.plot(r_[J0,].real, r_[J0,].imag, 'ks', color = alpha, ms = 8)\n pl.plot(r_[J0, J1].real, r_[J0, J1].imag, 'k-', color = alpha, lw=3)\n pl.plot(r_[J2, J1].real, r_[J2, J1].imag, 'ko-', color = alpha)\n pl.xticks= []\n pl.yticks= []\n pl.axis('equal')\n\n xmin = np.min(r_[J0, J1, J2].real)\n xmax = np.max(r_[J0, J1, J2].real)\n\n ymin = np.min(r_[J0, J1, J2].imag)\n ymax = np.max(r_[J0, J1, J2].imag)",
"def _forward_kinematics(self, body_name):\n p = self.dynsim.data.get_body_xpos(body_name)\n r = Quaternion(matrix=self.dynsim.data.get_body_xmat(body_name))\n\n return p, r",
"def sprinkler(l):\n t.right(90)\n t.forward(l / 2)\n t.right(-90)\n t.circle(l / 2)\n t.circle(- l / 2)\n t.left(90)\n t.forward(l / 2)\n t.right(90)\n t.forward(l)\n t.right(90)\n t.forward(l / 2)\n t.right(-90)\n t.circle(l / 2)\n t.circle(- l / 2)",
"def left(self, speed):\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rl' + self.postfix], speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fl' + self.postfix], speed,\n ONE_SHOT_MODE)",
"def baxter_forward_kinematics_from_joint_state(joint_state):\n \n\tangles = np.zeros(7)\n\tangles = joint_state.position[2:9]\n\treturn baxter_forward_kinematics_from_angles(angles)\n\t\n\tprint(baxter_forward_kinematics_from_angles(angles))",
"def move_north(self):\n self.vertical = (self.vertical * 2)[1:5]\n self.horizontal[1] = self.vertical[0]\n self.horizontal[3] = self.vertical[2]",
"def inv_kin(self, xy):\n\n def distance_to_default(q, *args): \n \"\"\"Objective function to minimize\n Calculates the euclidean distance through joint space to the default\n arm configuration. The weight list allows the penalty of each joint \n being away from the resting position to be scaled differently, such\n that the arm tries to stay closer to resting state more for higher \n weighted joints than those with a lower weight.\n \n :param list q: the list of current joint angles\n :returns scalar: euclidean distance to the default arm position\n \"\"\"\n # weights found with trial and error, get some wrist bend, but not much\n weight = [1, 1, 1.3, 1] \n return np.sqrt(np.sum([(qi - q0i)**2 * wi\n for qi,q0i,wi in zip(q, self.q0, weight)]))\n\n def x_constraint(q, xy):\n \"\"\"Returns the corresponding hand xy coordinates for \n a given set of joint angle values [shoulder, elbow, wrist], \n and the above defined arm segment lengths, L\n \n :param list q: the list of current joint angles\n :returns: the difference between current and desired x position\n \"\"\"\n x = ( self.L[0]*np.cos(q[0]) + self.L[1]*np.cos(q[0]+q[1]) + \n self.L[2]*np.cos(q[0]+q[1]+q[2]) + self.L[3]*np.cos(np.sum(q)) ) - xy[0]\n return x\n\n def y_constraint(q, xy): \n \"\"\"Returns the corresponding hand xy coordinates for \n a given set of joint angle values [shoulder, elbow, wrist], \n and the above defined arm segment lengths, L\n \n :param list q: the list of current joint angles\n :returns: the difference between current and desired y position\n \"\"\"\n y = ( self.L[0]*np.sin(q[0]) + self.L[1]*np.sin(q[0]+q[1]) + \n self.L[2]*np.sin(q[0]+q[1]+q[2]) + self.L[3]*np.sin(np.sum(q)) ) - xy[1]\n return y\n\n return scipy.optimize.fmin_slsqp( func=distance_to_default, \n x0=self.q, eqcons=[x_constraint, y_constraint], \n args=(xy,), iprint=0) # iprint=0 suppresses output",
"def move_left(self):\n self.yaw_motor.step_backward()",
"def arm(self):\n pass",
"def sidebounce(self):\r\n self.dx=-self.dx",
"def T(self):\n\n # Calculate the direction cosines for the local x-axis\n # The local x-axis will run from the i-node to the j-node\n xi = self.i_node.X\n xj = self.j_node.X\n yi = self.i_node.Y\n yj = self.j_node.Y\n zi = self.i_node.Z\n zj = self.j_node.Z\n x = [(xj - xi), (yj - yi), (zj - zi)]\n x = x/norm(x)\n \n # The local y-axis will be in the plane of the plate\n # Find a vector in the plate's local xy plane\n xn = self.n_node.X\n yn = self.n_node.Y\n zn = self.n_node.Z\n xy = [xn - xi, yn - yi, zn - zi]\n\n # Find a vector perpendicular to the plate surface to get the orientation of the local z-axis\n z = cross(x, xy)\n \n # Divide the vector by its magnitude to produce a unit z-vector of direction cosines\n z = z/norm(z)\n\n # Calculate the local y-axis as a vector perpendicular to the local z and x-axes\n y = cross(z, x)\n \n # Divide the z-vector by its magnitude to produce a unit vector of direction cosines\n y = y/norm(y)\n\n # Create the direction cosines matrix\n dirCos = array([x, y, z])\n \n # Build the transformation matrix\n transMatrix = zeros((24, 24))\n transMatrix[0:3, 0:3] = dirCos\n transMatrix[3:6, 3:6] = dirCos\n transMatrix[6:9, 6:9] = dirCos\n transMatrix[9:12, 9:12] = dirCos\n transMatrix[12:15, 12:15] = dirCos\n transMatrix[15:18, 15:18] = dirCos\n transMatrix[18:21, 18:21] = dirCos\n transMatrix[21:24, 21:24] = dirCos\n \n return transMatrix",
"def parallel_to_serial_joint_angles(self, joint_matrix):\n temp = joint_matrix\n temp[2, :] -= joint_matrix[1, :]\n return temp",
"def MoveLeftStep(self):\n if self.facing == 0:\n self.facing = 3\n self.x -= self.stepLeft\n elif self.facing == 1:\n self.facing = 0\n self.y -= self.stepUp\n elif self.facing == 2:\n self.facing = 1\n self.x += self.stepRight\n elif self.facing == 3:\n self.facing = 2\n self.y += self.stepDown",
"def __bcc_left_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result",
"def stokes_horizontal():\n return np.array([1, 1, 0, 0])",
"def xx(self):\n return self.exterior[:, 0]",
"def leftTurn(self):\n #print('leftTurn\\r')\n self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=1.0)",
"def horn_adjust(x, y):\n debug=False\n #debug=True\n meanX = x.mean(axis=0)\n meanY = y.mean(axis=0)\n translation = meanY - meanX\n x_centered = x - meanX\n y_centered = y - meanY\n if debug:\n print(\"x_centered\")\n print(x_centered)\n print(\"y_centered\")\n print(y_centered)\n # Find how much to rescale the x's. Entrywise multiplication.\n x_scale = np.sqrt((x_centered * x_centered).sum())\n y_scale = np.sqrt((y_centered * y_centered).sum())\n scale_factor = y_scale / x_scale\n x_centered_prime = x_centered * scale_factor\n if debug:\n print(\"scale_factor\")\n print(scale_factor)\n print(\"x_centered_prime\")\n print(x_centered_prime)\n # Find angle to rotate the planes\n x_perp = np.cross(x_centered_prime[0], x_centered_prime[1])\n y_perp = np.cross(y_centered[0], y_centered[1])\n # Find rotation matrix to rotate the x plane into the y plane\n # Using https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d\n # https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula\n x_perp_unit = x_perp / np.linalg.norm(x_perp)\n y_perp_unit = y_perp / np.linalg.norm(y_perp)\n v = np.cross(x_perp_unit, y_perp_unit)\n s = np.linalg.norm(v) # sine of angle between the planes\n c = x_perp_unit.dot(y_perp_unit) # cosine of angle between the planes\n v_x = np.array([[0, -v[2], v[1]],\n [v[2], 0, -v[0]],\n [-v[1], v[0], 0]])\n # rotation_p acts on the plane\n rotation_p = np.eye(3) + v_x + v_x.dot(v_x) * (1 - c) / s**2.0\n # Transpose to make each x a column vector, then transpose back for next part\n x_plane = rotation_p.dot(x_centered_prime.T).T\n # Now rotate within the plane, as in Sec. 5 of Horn\n v_y = np.array([[0, -y_perp_unit[2], y_perp_unit[1]],\n [y_perp_unit[2], 0, -y_perp_unit[0]],\n [-y_perp_unit[1], y_perp_unit[0], 0]])\n s_win_tmp = np.sum([np.cross(x_plane[i], y_centered[i]) for i in range(3)],\n axis=0).dot(y_perp_unit)\n c_win_tmp = np.sum([x_plane[i].dot(y_centered[i]) for i in range(3)],\n axis=0)\n sin_theta = s_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n cos_theta = c_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n rotation_win = np.eye(3) + sin_theta * v_y + (1 - cos_theta) * v_y.dot(v_y)\n # transpose so each column is an x vector, then transpose back at the end\n # x_final = rotation_win.dot(x_final.T).T\n rotation_full = rotation_win.dot(rotation_p)\n # Ignore scale_factor\n # T(x) = Ax + b\n A = rotation_full\n b = meanY - rotation_full.dot(meanX)\n if debug:\n print(\"A\")\n print(rotation_full)\n print(\"b\")\n print(b)\n return(A, b)",
"def x(self):\n return self._turtle.xcor()",
"def x(self):\n return self._turtle.xcor()"
] |
[
"0.58034986",
"0.54752916",
"0.54640716",
"0.5453321",
"0.5449038",
"0.5423321",
"0.5419779",
"0.53930056",
"0.5369793",
"0.536415",
"0.53400546",
"0.53232163",
"0.5317551",
"0.52692205",
"0.52663136",
"0.5261364",
"0.52119005",
"0.5207496",
"0.5202532",
"0.5192694",
"0.517984",
"0.51769227",
"0.51644856",
"0.51505494",
"0.5146476",
"0.5135659",
"0.51341534",
"0.51286405",
"0.5119487",
"0.5119487"
] |
0.5684662
|
1
|
Get load balancer related models. The models are stored in a SimpleNamespace object, could be accessed by the dot operator like `load_balancer_models.ManagedClusterLoadBalancerProfile`.
|
def load_balancer_models(self) -> SimpleNamespace:
if self.__loadbalancer_models is None:
load_balancer_models = {}
load_balancer_models["ManagedClusterLoadBalancerProfile"] = self.ManagedClusterLoadBalancerProfile
load_balancer_models[
"ManagedClusterLoadBalancerProfileManagedOutboundIPs"
] = self.ManagedClusterLoadBalancerProfileManagedOutboundIPs
load_balancer_models[
"ManagedClusterLoadBalancerProfileOutboundIPs"
] = self.ManagedClusterLoadBalancerProfileOutboundIPs
load_balancer_models[
"ManagedClusterLoadBalancerProfileOutboundIPPrefixes"
] = self.ManagedClusterLoadBalancerProfileOutboundIPPrefixes
load_balancer_models["ResourceReference"] = self.ResourceReference
self.__loadbalancer_models = SimpleNamespace(**load_balancer_models)
return self.__loadbalancer_models
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_models(self):\n self.load()\n return self._models",
"def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)",
"def models(self):\n models = []\n for bundle in self.bundles.values():\n models.extend(list(bundle.models.values()))\n\n return models",
"def get_models():\n param = {'C': 0.7678243129497218, 'penalty': 'l2'}\n model1 = LogisticRegression(**param)\n\n param = {'n_neighbors': 8}\n model2 = KNeighborsClassifier(**param)\n\n param = {'C': 1.7, 'kernel': 'linear', 'probability':True}\n model3 = SVC(**param)\n\n param = {'criterion': 'gini', 'max_depth': 3, 'max_features': 2, 'min_samples_leaf': 3}\n model4 = DecisionTreeClassifier(**param)\n\n param = {'learning_rate': 0.05, 'n_estimators': 150}\n model5 = AdaBoostClassifier(**param)\n\n param = {'learning_rate': 0.01, 'n_estimators': 100}\n model6 = GradientBoostingClassifier(**param)\n\n model7 = RandomForestClassifier()\n\n model8 = ExtraTreesClassifier()\n\n models = {'LR':model1, 'KNN':model2, 'SVC':model3, 'DT':model4,\n 'ADa':model5, 'GB':model6, 'RF':model7, 'ET':model8\n }\n return models",
"def models(self):\n return self._base.classes",
"def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def simple_models(self, uuid=None):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n\n if uuid is None:\n r = requests.get(self.url + '/model', headers=headers)\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)\n else:\n r = requests.get(self.url + '/model/' + uuid, headers=headers)\n if r.status_code == 200:\n return self.build_simple_model(json.loads(r.content))\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)",
"def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]",
"def models(self):\n return self.config.models()",
"def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models",
"def get_model_by_name(self, model_name):\n models = ModelDirectory.get_model_by_name(model_name, pipeline=self)\n return models",
"def generate_models():\n model_names = [\"MLPClassifier\", \"AdaBoostClassifier\", \"SVC\",\n \"KNeighborsClassifier\", \"GaussianProcessClassifier\", \"GaussianNB\",\n \"QuadraticDiscriminantAnalysis\", \"DecisionTreeClassifier\", \"RandomForestClassifier\",\n \"MLPClassifier\"]\n models = [MLPClassifier(), AdaBoostClassifier(), SVC(),\n KNeighborsClassifier(), GaussianProcessClassifier(), GaussianNB(),\n QuadraticDiscriminantAnalysis(), DecisionTreeClassifier(), RandomForestClassifier()]\n models_and_names = zip(model_names, models)\n return models_and_names",
"def load_models():\n vectorizer = ModelStorage.objects.all().values_list(\"vectorizer\", flat = True)[0]\n classifier = ModelStorage.objects.all().values_list(\"classifier\", flat = True)[0]\n\n return vectorizer, classifier",
"def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)",
"def models() -> list[str]:\n return list(models_url.keys())",
"def models(r, model):\n\n\tif model==\"PREM\":\n\t\treturn model_prem(r)\n\n\telif model==\"PREM_iso\":\n\t\treturn model_prem_iso(r)\n\n\telif model==\"ONELAYER\":\n\t\treturn model_onelayer(r)\n\n\telif model==\"ONELAYER_pert\":\n\t\treturn model_onelayer_pert(r)\n\n\telif model==\"GUTENBERG\":\n\t\treturn model_gutenberg(r)",
"def fetch_ml_model_info() -> ApiResponse:\n return _api_response(settings.ML_MODELS)",
"def get_related_models(self):\n\t\tmodels = []\n\t\tif not self.related_models:\n\t\t\treturn models\n\n\t\tfor model in self.related_overrides.get(self.related_override_key(), self.related_models):\n\t\t\ttry:\n\t\t\t\tgroup, model_path, extra_fields = model\n\t\t\texcept ValueError:\n\t\t\t\tgroup, model_path = model\n\t\t\t\textra_fields = ()\n\t\t\tapp_label, model_name = model_path.split('.')\n\t\t\tmodels.append((group, apps.get_model(app_label, model_name,), extra_fields, group.replace('_', ' ')))\n\n\t\treturn models",
"def models_from(module):\n return {model.__name__.lower(): model for model in vars(module).values() if is_model(model)}",
"def simple_active_models(self, label=None):\n # make the request to get active models from Razor\n \n am_content = self.active_models(label)\n\n #print json.dumps(am_content, indent=4)\n \n # Check the status code and return appropriately\n if 'response' in am_content.keys():\n active_models = {}\n for response in am_content['response']:\n\n # get info from razor about the active model\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/active_model/' + response['@uuid'], headers=headers)\n single_am_content = json.loads(r.content)\n #print json.dumps(single_am_content, indent=2)\n active_models[response['@uuid']] = self.build_simple_active_model(single_am_content)\n\n return active_models\n else:\n return 'Error in request, exited with status code: ' + str(r.status_code)",
"def models(self):\r\n return self.get_field('model')",
"def models(self):\r\n return self.get_field('model')",
"def get_object_models(self):\n parser = WorldParser(self.world_fpath)\n return parser.models",
"def get_labels(model):\n return model._labels",
"def get_labels(model):\n return model._labels",
"def get_labels(model):\n return model._labels",
"def get_labels(model):\n return model._labels",
"def models(self) -> t.List[Model]:\n _models: t.List[Model] = [\n item for item in self._deployables if isinstance(item, Model)\n ]\n return _models",
"def models(self) -> list[AbstractModel]:\n return self._models"
] |
[
"0.6290427",
"0.61453086",
"0.5930261",
"0.5809687",
"0.57820994",
"0.5760742",
"0.5676707",
"0.5666027",
"0.5642722",
"0.5629715",
"0.5624789",
"0.5556899",
"0.55357313",
"0.5533739",
"0.5526333",
"0.5447427",
"0.5439883",
"0.5436307",
"0.541895",
"0.54091656",
"0.54033613",
"0.5398941",
"0.5398941",
"0.53984624",
"0.5372203",
"0.5372203",
"0.5372203",
"0.5372203",
"0.53529465",
"0.5329243"
] |
0.7657893
|
0
|
Get nat gateway related models. The models are stored in a SimpleNamespace object, could be accessed by the dot operator like `nat_gateway_models.ManagedClusterNATGatewayProfile`.
|
def nat_gateway_models(self) -> SimpleNamespace:
if self.__nat_gateway_models is None:
nat_gateway_models = {}
nat_gateway_models["ManagedClusterNATGatewayProfile"] = (
self.ManagedClusterNATGatewayProfile if hasattr(self, "ManagedClusterNATGatewayProfile") else None
) # backward compatibility
nat_gateway_models["ManagedClusterManagedOutboundIPProfile"] = (
self.ManagedClusterManagedOutboundIPProfile
if hasattr(self, "ManagedClusterManagedOutboundIPProfile")
else None
) # backward compatibility
self.__nat_gateway_models = SimpleNamespace(**nat_gateway_models)
return self.__nat_gateway_models
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models",
"def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)",
"def get_models(self):\n self.load()\n return self._models",
"def get_models():\n D = Discriminator(num_channels=params.num_channels,\n conv_dim=params.d_conv_dim,\n image_size=params.image_size,\n num_gpu=params.num_gpu,\n num_extra_layers=params.num_extra_layers,\n use_BN=False)\n G = Generator(num_channels=params.num_channels,\n z_dim=params.z_dim,\n conv_dim=params.g_conv_dim,\n image_size=params.image_size,\n num_gpu=params.num_gpu,\n num_extra_layers=params.num_extra_layers,\n use_BN=True)\n\n # init weights of models\n D.apply(init_weights)\n G.apply(init_weights)\n\n # restore model weights\n if params.d_model_restore is not None and \\\n os.path.exists(params.d_model_restore):\n D.load_state_dict(torch.load(params.d_model_restore))\n if params.g_model_restore is not None and \\\n os.path.exists(params.g_model_restore):\n G.load_state_dict(torch.load(params.g_model_restore))\n\n # check if cuda is available\n if torch.cuda.is_available():\n cudnn.benchmark = True\n D.cuda()\n G.cuda()\n\n print(D)\n print(G)\n\n return D, G",
"def gnn_model_dict():\n\n from .message_passing import agnnconv, econv, gatconv, meta, nnconv, nnconv_elu, nnconv_old\n\n models = {\n \"agnnconv\" : agnnconv.AGNNConvModel,\n \"econv\" : econv.EConvModel,\n \"gatconv\" : gatconv.GATConvModel,\n \"nnconv\" : nnconv.NNConvModel,\n \"meta\" : meta.MetaLayerModel,\n \"nnconv_elu\" : nnconv_elu.NNConvModel,\n \"nnconv_old\" : nnconv_old.NNConvModel\n }\n\n return models",
"def get_object_models(self):\n parser = WorldParser(self.world_fpath)\n return parser.models",
"def models(self):\n return self.config.models()",
"def _get_model(self):\n layers = []\n\n # inner / hidden network layers + non-linearities\n for l in self.network_layers:\n layers.append(Dense(l))\n layers.append(Relu)\n\n # output layer (no non-linearity)\n layers.append(Dense(self.output_dimension))\n \n # make jax stax object\n model = stax.serial(*layers)\n\n return model",
"def models(self):\n return self._base.classes",
"def simple_models(self, uuid=None):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n\n if uuid is None:\n r = requests.get(self.url + '/model', headers=headers)\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)\n else:\n r = requests.get(self.url + '/model/' + uuid, headers=headers)\n if r.status_code == 200:\n return self.build_simple_model(json.loads(r.content))\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)",
"def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models",
"def load_balancer_models(self) -> SimpleNamespace:\n if self.__loadbalancer_models is None:\n load_balancer_models = {}\n load_balancer_models[\"ManagedClusterLoadBalancerProfile\"] = self.ManagedClusterLoadBalancerProfile\n load_balancer_models[\n \"ManagedClusterLoadBalancerProfileManagedOutboundIPs\"\n ] = self.ManagedClusterLoadBalancerProfileManagedOutboundIPs\n load_balancer_models[\n \"ManagedClusterLoadBalancerProfileOutboundIPs\"\n ] = self.ManagedClusterLoadBalancerProfileOutboundIPs\n load_balancer_models[\n \"ManagedClusterLoadBalancerProfileOutboundIPPrefixes\"\n ] = self.ManagedClusterLoadBalancerProfileOutboundIPPrefixes\n load_balancer_models[\"ResourceReference\"] = self.ResourceReference\n self.__loadbalancer_models = SimpleNamespace(**load_balancer_models)\n return self.__loadbalancer_models",
"def opt_get_all_models_rest_api():\n return retrieve_all_models()",
"def load_model(gateway_name=None):\n if gateway_name and len(gateway_name) > 0:\n model = pk.load(open(\"models/\" + gateway_name + \"_model.pk\", \"r\"))\n else:\n model = pk.load(open(\"models/all_model.pk\", \"r\"))\n return model",
"def get_related_models(self):\n\t\tmodels = []\n\t\tif not self.related_models:\n\t\t\treturn models\n\n\t\tfor model in self.related_overrides.get(self.related_override_key(), self.related_models):\n\t\t\ttry:\n\t\t\t\tgroup, model_path, extra_fields = model\n\t\t\texcept ValueError:\n\t\t\t\tgroup, model_path = model\n\t\t\t\textra_fields = ()\n\t\t\tapp_label, model_name = model_path.split('.')\n\t\t\tmodels.append((group, apps.get_model(app_label, model_name,), extra_fields, group.replace('_', ' ')))\n\n\t\treturn models",
"def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def availablemodels(self):\n return self.__models.keys()",
"def models(r, model):\n\n\tif model==\"PREM\":\n\t\treturn model_prem(r)\n\n\telif model==\"PREM_iso\":\n\t\treturn model_prem_iso(r)\n\n\telif model==\"ONELAYER\":\n\t\treturn model_onelayer(r)\n\n\telif model==\"ONELAYER_pert\":\n\t\treturn model_onelayer_pert(r)\n\n\telif model==\"GUTENBERG\":\n\t\treturn model_gutenberg(r)",
"def get_models():\n param = {'C': 0.7678243129497218, 'penalty': 'l2'}\n model1 = LogisticRegression(**param)\n\n param = {'n_neighbors': 8}\n model2 = KNeighborsClassifier(**param)\n\n param = {'C': 1.7, 'kernel': 'linear', 'probability':True}\n model3 = SVC(**param)\n\n param = {'criterion': 'gini', 'max_depth': 3, 'max_features': 2, 'min_samples_leaf': 3}\n model4 = DecisionTreeClassifier(**param)\n\n param = {'learning_rate': 0.05, 'n_estimators': 150}\n model5 = AdaBoostClassifier(**param)\n\n param = {'learning_rate': 0.01, 'n_estimators': 100}\n model6 = GradientBoostingClassifier(**param)\n\n model7 = RandomForestClassifier()\n\n model8 = ExtraTreesClassifier()\n\n models = {'LR':model1, 'KNN':model2, 'SVC':model3, 'DT':model4,\n 'ADa':model5, 'GB':model6, 'RF':model7, 'ET':model8\n }\n return models",
"def get_models(self):\n return [Doc(system_object) for system_object in self._get_documents()]",
"def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]",
"def fetch_ml_model_info() -> ApiResponse:\n return _api_response(settings.ML_MODELS)",
"def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def models(self):\n models = []\n for bundle in self.bundles.values():\n models.extend(list(bundle.models.values()))\n\n return models",
"def list_models(self, sort: bool = True, limit: int | None = None) -> Iterator[ExecutableModelSpace]:\n return self._strategy.list_models(sort=sort, limit=limit)",
"def get_intermediate_models(self):\n\n intermediate_models = []\n for mb_model in self.mb_model_list:\n fks = self.get_foreign_keys(mb_model)\n # add models with fields that are unique together\n if (\n len(fks) >= 2 and\n mb_model._meta.unique_together and\n mb_model._meta.db_table[-4:] != \"_raw\" and\n mb_model._meta.db_table[-8:] != \"_deleted\"\n ):\n intermediate_models.append(mb_model)\n # add models with a ForeignKey set to Link\n elif (\n len(fks) >= 3 and\n mb_models.Link in fks.values()\n ):\n intermediate_models.append(mb_model)\n return intermediate_models",
"def __get_network_routes(self):\n routes = []\n\n gws = netifaces.gateways()\n for k in gws.keys():\n if k == 'default':\n continue\n\n\t for r in gws[k]:\n (ip,interface,is_gateway) = r\n\n gw_name = \"{0}\".format(netifaces.address_families[k])\n\n routes.append({\n gw_name : {\n 'ip_address' : ip,\n 'interface' : interface,\n\t\t\t 'default' : is_gateway\n }\n \n }\n )\n\n return routes",
"def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)",
"def get_seo_models():\n seo_models = []\n for model_name in getattr(settings, setting_name_seo_models, ()):\n if \".\" in model_name:\n # TODO: Test this block\n app_label, model_name = model_name.split(\".\", 1)\n model = models.get_model(app_label, model_name)\n if model:\n seo_models.append(model)\n else:\n app = models.get_app(model_name)\n if app:\n seo_models.extend(models.get_models(app))\n\n return seo_models",
"def list_models():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_models\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)"
] |
[
"0.5768648",
"0.5745505",
"0.5712239",
"0.56414086",
"0.556433",
"0.5561573",
"0.5515015",
"0.5510445",
"0.54994595",
"0.5485388",
"0.54605484",
"0.5453052",
"0.5449938",
"0.53977805",
"0.5384797",
"0.53555644",
"0.5336726",
"0.53359085",
"0.5330911",
"0.53225255",
"0.5315613",
"0.5305768",
"0.5275818",
"0.5263911",
"0.5256657",
"0.52538353",
"0.5245642",
"0.52434164",
"0.5238589",
"0.52144885"
] |
0.7860511
|
0
|
Get maintenance configuration related models. The models are stored in a SimpleNamespace object, could be accessed by the dot operator like `maintenance_configuration_models.ManagedClusterMaintenanceConfigurationProfile`.
|
def maintenance_configuration_models(self) -> SimpleNamespace:
if self.__maintenance_configuration_models is None:
maintenance_configuration_models = {}
# getting maintenance configuration related models
maintenance_configuration_models["MaintenanceConfiguration"] = self.MaintenanceConfiguration
maintenance_configuration_models["MaintenanceConfigurationListResult"] = self.MaintenanceConfigurationListResult
maintenance_configuration_models["MaintenanceWindow"] = self.MaintenanceWindow
maintenance_configuration_models["Schedule"] = self.Schedule
maintenance_configuration_models["DailySchedule"] = self.DailySchedule
maintenance_configuration_models["WeeklySchedule"] = self.WeeklySchedule
maintenance_configuration_models["AbsoluteMonthlySchedule"] = self.AbsoluteMonthlySchedule
maintenance_configuration_models["RelativeMonthlySchedule"] = self.RelativeMonthlySchedule
maintenance_configuration_models["TimeSpan"] = self.TimeSpan
maintenance_configuration_models["TimeInWeek"] = self.TimeInWeek
self.__maintenance_configuration_models = SimpleNamespace(**maintenance_configuration_models)
return self.__maintenance_configuration_models
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def models(self):\n return self.config.models()",
"def get_seo_models():\n seo_models = []\n for model_name in getattr(settings, setting_name_seo_models, ()):\n if \".\" in model_name:\n # TODO: Test this block\n app_label, model_name = model_name.split(\".\", 1)\n model = models.get_model(app_label, model_name)\n if model:\n seo_models.append(model)\n else:\n app = models.get_app(model_name)\n if app:\n seo_models.extend(models.get_models(app))\n\n return seo_models",
"def get_models(self):\n self.load()\n return self._models",
"def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)",
"def fetch_ml_model_info() -> ApiResponse:\n return _api_response(settings.ML_MODELS)",
"def get_models(self):\n return [Doc(system_object) for system_object in self._get_documents()]",
"def models(self):\n return self._base.classes",
"def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models",
"def load_balancer_models(self) -> SimpleNamespace:\n if self.__loadbalancer_models is None:\n load_balancer_models = {}\n load_balancer_models[\"ManagedClusterLoadBalancerProfile\"] = self.ManagedClusterLoadBalancerProfile\n load_balancer_models[\n \"ManagedClusterLoadBalancerProfileManagedOutboundIPs\"\n ] = self.ManagedClusterLoadBalancerProfileManagedOutboundIPs\n load_balancer_models[\n \"ManagedClusterLoadBalancerProfileOutboundIPs\"\n ] = self.ManagedClusterLoadBalancerProfileOutboundIPs\n load_balancer_models[\n \"ManagedClusterLoadBalancerProfileOutboundIPPrefixes\"\n ] = self.ManagedClusterLoadBalancerProfileOutboundIPPrefixes\n load_balancer_models[\"ResourceReference\"] = self.ResourceReference\n self.__loadbalancer_models = SimpleNamespace(**load_balancer_models)\n return self.__loadbalancer_models",
"def get_object_models(self):\n parser = WorldParser(self.world_fpath)\n return parser.models",
"def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")",
"def get():\n\n return {'model_ids': mgmt.get_model_ids()}",
"def get_models():\n param = {'C': 0.7678243129497218, 'penalty': 'l2'}\n model1 = LogisticRegression(**param)\n\n param = {'n_neighbors': 8}\n model2 = KNeighborsClassifier(**param)\n\n param = {'C': 1.7, 'kernel': 'linear', 'probability':True}\n model3 = SVC(**param)\n\n param = {'criterion': 'gini', 'max_depth': 3, 'max_features': 2, 'min_samples_leaf': 3}\n model4 = DecisionTreeClassifier(**param)\n\n param = {'learning_rate': 0.05, 'n_estimators': 150}\n model5 = AdaBoostClassifier(**param)\n\n param = {'learning_rate': 0.01, 'n_estimators': 100}\n model6 = GradientBoostingClassifier(**param)\n\n model7 = RandomForestClassifier()\n\n model8 = ExtraTreesClassifier()\n\n models = {'LR':model1, 'KNN':model2, 'SVC':model3, 'DT':model4,\n 'ADa':model5, 'GB':model6, 'RF':model7, 'ET':model8\n }\n return models",
"def models(self) -> t.List[Model]:\n _models: t.List[Model] = [\n item for item in self._deployables if isinstance(item, Model)\n ]\n return _models",
"def models(self):\n models = []\n for bundle in self.bundles.values():\n models.extend(list(bundle.models.values()))\n\n return models",
"def models(self):\r\n return self.get_field('model')",
"def models(self):\r\n return self.get_field('model')",
"def simple_models(self, uuid=None):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n\n if uuid is None:\n r = requests.get(self.url + '/model', headers=headers)\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)\n else:\n r = requests.get(self.url + '/model/' + uuid, headers=headers)\n if r.status_code == 200:\n return self.build_simple_model(json.loads(r.content))\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)",
"def index(self, req):\n return self._get_models(req, is_detail=False)",
"def get_default_models():\n\n print (\"\")\n log_app.debug(\"create_new_config\")\n log_app.debug(\"create_new_config / method : %s\", request.method )\n \n only_default = request.args.get('only_default', default=False, type=bool)\n\n models = None \n\n ### check if uuid is new and not already used\n globalColl = mongoConfigColls['global']\n\n if only_default : \n query = {'is_default' : True, 'can_be_used_as_model' : True }\n else :\n query = {'can_be_used_as_model' : True }\n\n results = list(globalColl.find(query, {'_id': 0 }))\n\n tempList = []\n if results : \n for doc in results : \n # trim fields\n model = get_config_model( doc['apiviz_front_uuid'], returnDict=True, noRemap=False)\n tempList.append(model)\n models = tempList\n\n # log_app.debug(\"create_new_config / models : \\n%s\", pformat(models) )\n\n return jsonify({\n 'msg' : 'here comes the models defined as default and authorized to be copied',\n 'models': models,\n })",
"def models():\n # Do not include SingleAttacker as an available model, just get users to pass\n # an attacker model directly instead.\n return [cls for cls in AttackerConfiguration.__subclasses__() if cls != SingleAttacker] # pylint: disable=no-member",
"def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models",
"def availablemodels(self):\n return self.__models.keys()",
"def maintenance_policies(self) -> Sequence['outputs.GetKubernetesClusterMaintenancePolicyResult']:\n return pulumi.get(self, \"maintenance_policies\")",
"def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list",
"def getmodels(self, n):\n r = ConfigurableManager.getmodels(self, n)\n # EMANE global params are stored with first EMANE node (if non-default\n # values are configured)\n sorted_ids = sorted(self.configs.keys())\n if None in self.configs and len(sorted_ids) > 1 and \\\n n.objid == sorted_ids[1]:\n v = self.configs[None]\n for model in v:\n cls = self._modelclsmap[model[0]]\n vals = model[1]\n r.append((cls, vals))\n return r",
"def get_related_models(self, model):\n return self._invalidation_model_store.get(model, {})",
"def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]",
"def get_supported_models(self):\n # type: () -> list\n return [model for model in self.__MODELS]"
] |
[
"0.6285593",
"0.57894367",
"0.5700239",
"0.5656415",
"0.56433",
"0.5520452",
"0.55160034",
"0.5408721",
"0.5361932",
"0.5250989",
"0.5221271",
"0.5219817",
"0.5212507",
"0.5210442",
"0.51967865",
"0.51966053",
"0.51966053",
"0.5143466",
"0.5135863",
"0.5131256",
"0.5119935",
"0.51094604",
"0.50789076",
"0.50738055",
"0.49816358",
"0.49806648",
"0.49750504",
"0.49433413",
"0.49184334",
"0.49110973"
] |
0.7894215
|
0
|
Get the existing ManagedCluster object in update mode.
|
def existing_mc(self) -> ManagedCluster:
return self.__existing_mc
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fetch_mc(self) -> ManagedCluster:\n mc = self.client.get(self.context.get_resource_group_name(), self.context.get_name())\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def cluster(self):\n return self._cluster",
"def cluster(self):\n return self._cluster",
"def version_cluster(self):\n response = self._request_call('/version')\n return response.version_etcdcluster",
"def update(id, body: Body):\n\n cluster = clusters.get_by_id(id)\n\n if cluster is None:\n raise HTTPException(status_code=404, detail=\"Cluster not found for ID: {0}\".format(id))\n\n cluster.update(body.dict())\n cluster = clusters.update(cluster)\n\n return cluster.export()",
"def management_cluster(self) -> pulumi.Output['outputs.PrivateCloudManagementCluster']:\n return pulumi.get(self, \"management_cluster\")",
"def poll_cluster(self, server, obj, name):\n\n return self._poll_group('cluster', server, obj, name)",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]",
"def load(name):\n\n clovr = pymongo.Connection().clovr\n clusters = clovr.clusters\n instances = clovr.instances\n \n cluster = clusters.find_one(dict(name=name))\n if not cluster:\n raise ClusterDoesNotExist(name)\n\n\n return cluster",
"def clustering(self) -> 'outputs.ClusteringResponse':\n return pulumi.get(self, \"clustering\")",
"def _get_cluster_list(self):\n return self.__cluster_list",
"def get(self, request, cluster_id): # pylint: disable=arguments-differ\n cluster = check_obj(Cluster, cluster_id, 'CLUSTER_NOT_FOUND')\n obj = cm.api.get_upgrade(cluster, self.get_ordering(request, self.queryset, self))\n serializer = self.serializer_class(obj, many=True, context={\n 'cluster_id': cluster.id, 'request': request\n })\n return Response(serializer.data)",
"def get_cluster(self) -> 'AioCluster':\n return AioCluster(self)",
"def get_cluster(self, label):\n try:\n return self._clusters[label]\n except KeyError:\n return None",
"def init_mc(self) -> ManagedCluster:\n # Initialize a ManagedCluster object with mandatory parameter location.\n mc = self.models.ManagedCluster(\n location=self.context.get_location(),\n )\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def test_refresh_cluster(self):\n # Save original cluster wrapper for later comparison\n orig_clust_wrap = pvm_clust.Cluster.wrap(self.clust_resp)\n # Prime _clust_wrap\n ssp_stor = self._get_ssp_stor()\n # Verify baseline call counts\n self.assertEqual(1, self.mock_search.call_count)\n self.assertEqual(0, self.mock_clust_refresh.call_count)\n clust_wrap = ssp_stor._refresh_cluster()\n # This should call refresh\n self.assertEqual(1, self.mock_search.call_count)\n self.assertEqual(0, self.apt.read.call_count)\n self.assertEqual(1, self.mock_clust_refresh.call_count)\n self.assertEqual(clust_wrap.name, orig_clust_wrap.name)",
"def update_coe_cluster(self, name_or_id, **kwargs):\n self.list_coe_clusters.invalidate(self)\n cluster = self.get_coe_cluster(name_or_id)\n if not cluster:\n raise exc.OpenStackCloudException(\n \"COE cluster %s not found.\" % name_or_id\n )\n\n cluster = self.container_infrastructure_management.update_cluster(\n cluster, **kwargs\n )\n\n return cluster",
"def cluster_info(self) -> ClusterInfoResult:\n if not self.connected:\n raise RuntimeError(\n \"Cluster is not connected, cannot get cluster info.\")\n cluster_info = None\n cluster_info = self._get_cluster_info()\n self._cluster_info = cluster_info\n return cluster_info",
"def get(self, request, cluster_id, upgrade_id): # pylint: disable=arguments-differ\n cluster = check_obj(Cluster, cluster_id, 'CLUSTER_NOT_FOUND')\n obj = self.get_queryset().get(id=upgrade_id)\n serializer = self.serializer_class(obj, context={\n 'cluster_id': cluster.id, 'request': request\n })\n return Response(serializer.data)",
"def management_cluster(self) -> Optional[pulumi.Input['PrivateCloudManagementClusterArgs']]:\n return pulumi.get(self, \"management_cluster\")",
"def get_cluster(t2_url, t2_token, id):\n response = requests.get(f\"{t2_url}/api/clusters/{id}\", headers={ \"t2-token\": t2_token })\n if(response.status_code != 200):\n log(f\"API call to get cluster returned error code {response.status_code}\")\n return None\n return response.json()",
"def getClusterStatus(self):\n data = self.connect('get','cluster/status', None)\n return data",
"def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))",
"def add_new_cluster(self):\n self.result.append(Cluster.Cluster())\n return len(self.result) - 1",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n cluster_id: Optional[pulumi.Input[str]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ClusterIdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n size_gb: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"cluster_id\"] = cluster_id\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"size_gb\"] = size_gb\n __props__.__dict__[\"tags\"] = tags\n return Cluster(resource_name, opts=opts, __props__=__props__)",
"def update_mc(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n return self.put_mc(mc)",
"def gke_cluster(self) -> Optional['outputs.MembershipEndpointGkeCluster']:\n return pulumi.get(self, \"gke_cluster\")",
"def cluster_description(self):\n if self._cluster_description is None:\n if self._parsed_globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._parsed_globals.region,\n endpoint_url=self._parsed_globals.endpoint_url,\n verify=self._parsed_globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] not in [\"ACTIVE\", \"UPDATING\"]:\n raise EKSClusterError(\"Cluster status is {0}\".format(\n self._cluster_description[\"status\"]\n ))\n\n return self._cluster_description"
] |
[
"0.6477688",
"0.6216452",
"0.6216452",
"0.6137707",
"0.60626763",
"0.5750455",
"0.5741859",
"0.57252145",
"0.5679924",
"0.5643245",
"0.5634165",
"0.5621059",
"0.56092423",
"0.5558072",
"0.5544139",
"0.55095327",
"0.55016065",
"0.5501055",
"0.54964584",
"0.54956895",
"0.5488242",
"0.5479565",
"0.5450501",
"0.5443073",
"0.5438425",
"0.54357624",
"0.5429794",
"0.54165894",
"0.5408478",
"0.54031885"
] |
0.6553012
|
0
|
Attach the ManagedCluster object to the context. The `mc` object is only allowed to be attached once, and attaching again will raise a CLIInternalError.
|
def attach_mc(self, mc: ManagedCluster) -> None:
if self.decorator_mode == DecoratorMode.UPDATE:
self.attach_existing_mc(mc)
if self.mc is None:
self.mc = mc
else:
msg = "the same" if self.mc == mc else "different"
raise CLIInternalError(
"Attempting to attach the `mc` object again, the two objects are {}.".format(
msg
)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def attach_existing_mc(self, mc: ManagedCluster) -> None:\n if self.__existing_mc is None:\n self.__existing_mc = mc\n else:\n msg = \"the same\" if self.__existing_mc == mc else \"different\"\n raise CLIInternalError(\n \"Attempting to attach the existing `mc` object again, the two objects are {}.\".format(\n msg\n )\n )",
"def init_mc(self) -> ManagedCluster:\n # Initialize a ManagedCluster object with mandatory parameter location.\n mc = self.models.ManagedCluster(\n location=self.context.get_location(),\n )\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def process_attach_acr(self, mc: ManagedCluster) -> None:\n self._ensure_mc(mc)\n\n attach_acr = self.context.get_attach_acr()\n if attach_acr:\n # If enable_managed_identity, attach acr operation will be handled after the cluster is created\n if not self.context.get_enable_managed_identity():\n service_principal_profile = mc.service_principal_profile\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=service_principal_profile.client_id,\n acr_name_or_id=attach_acr,\n # not actually used\n subscription_id=self.context.get_subscription_id(),\n )",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif self.context.raw_param.get(\"enable_addons\") is not None:\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = self.context.get_enable_managed_identity()\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )\n\n # azure monitor metrics addon (v2)\n azuremonitormetrics_addon_enabled = self.context.get_intermediate(\n \"azuremonitormetrics_addon_enabled\",\n default_value=False\n )\n if azuremonitormetrics_addon_enabled:\n # Create the DC* objects, AMW, recording rules and grafana link here\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n True\n )",
"def fetch_mc(self) -> ManagedCluster:\n mc = self.client.get(self.context.get_resource_group_name(), self.context.get_name())\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif (\n self.context.raw_param.get(\"enable_addons\") is not None or\n self.context.raw_param.get(\"disable_addons\") is not None\n ):\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = check_is_msi_cluster(cluster)\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )",
"def create_mc(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Due to SPN replication latency, we do a few retries here\n max_retry = 30\n error_msg = \"\"\n for _ in range(0, max_retry):\n try:\n cluster = self.put_mc(mc)\n return cluster\n # CloudError was raised before, but since the adoption of track 2 SDK,\n # HttpResponseError would be raised instead\n except (CloudError, HttpResponseError) as ex:\n error_msg = str(ex)\n if \"not found in Active Directory tenant\" in ex.message:\n time.sleep(3)\n else:\n raise map_azure_error_to_cli_error(ex)\n raise AzCLIError(\"Maximum number of retries exceeded. \" + error_msg)",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.UPDATE\n )",
"def _ensure_mc(self, mc: ManagedCluster) -> None:\n if not isinstance(mc, self.models.ManagedCluster):\n raise CLIInternalError(\n \"Unexpected mc object with type '{}'.\".format(type(mc))\n )\n\n if self.context.mc != mc:\n raise CLIInternalError(\n \"Inconsistent state detected. The incoming `mc` \"\n \"is not the same as the `mc` in the context.\"\n )",
"def _ensure_mc(self, mc: ManagedCluster) -> None:\n if not isinstance(mc, self.models.ManagedCluster):\n raise CLIInternalError(\n \"Unexpected mc object with type '{}'.\".format(type(mc))\n )\n\n if self.context.mc != mc:\n raise CLIInternalError(\n \"Inconsistent state detected. The incoming `mc` is not the same as the `mc` in the context.\"\n )",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.CREATE\n )",
"def process_attach_detach_acr(self, mc: ManagedCluster) -> None:\n self._ensure_mc(mc)\n\n subscription_id = self.context.get_subscription_id()\n assignee, is_service_principal = self.context.get_assignee_from_identity_or_sp_profile()\n attach_acr = self.context.get_attach_acr()\n detach_acr = self.context.get_detach_acr()\n\n if attach_acr:\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=assignee,\n acr_name_or_id=attach_acr,\n subscription_id=subscription_id,\n is_service_principal=is_service_principal,\n )\n\n if detach_acr:\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=assignee,\n acr_name_or_id=detach_acr,\n subscription_id=subscription_id,\n detach=True,\n is_service_principal=is_service_principal,\n )",
"def set_up_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_tags(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n tags = self.context.get_tags()\n if tags is not None:\n mc.tags = tags\n return mc",
"def set_up_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n aad_profile = None\n enable_aad = self.context.get_enable_aad()\n if enable_aad:\n aad_profile = self.models.ManagedClusterAADProfile(\n managed=True,\n enable_azure_rbac=self.context.get_enable_azure_rbac(),\n # ids -> i_ds due to track 2 naming issue\n admin_group_object_i_ds=self.context.get_aad_admin_group_object_ids(),\n tenant_id=self.context.get_aad_tenant_id()\n )\n else:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = (\n self.context.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()\n )\n aad_tenant_id = self.context.get_aad_tenant_id()\n if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):\n aad_profile = self.models.ManagedClusterAADProfile(\n client_app_id=aad_client_app_id,\n server_app_id=aad_server_app_id,\n server_app_secret=aad_server_app_secret,\n tenant_id=aad_tenant_id\n )\n mc.aad_profile = aad_profile\n return mc",
"def update_mc(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n return self.put_mc(mc)",
"def set_up_mc_properties(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.tags = self.context.get_tags()\n mc.kubernetes_version = self.context.get_kubernetes_version()\n mc.dns_prefix = self.context.get_dns_name_prefix()\n mc.disk_encryption_set_id = self.context.get_node_osdisk_diskencryptionset_id()\n mc.disable_local_accounts = self.context.get_disable_local_accounts()\n mc.enable_rbac = not self.context.get_disable_rbac()\n return mc",
"def set_up_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity = None\n enable_managed_identity = self.context.get_enable_managed_identity()\n assign_identity = self.context.get_assign_identity()\n if enable_managed_identity and not assign_identity:\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif enable_managed_identity and assign_identity:\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def set_up_extended_location(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n edge_zone = self.context.get_edge_zone()\n if edge_zone:\n mc.extended_location = self.models.ExtendedLocation(\n name=edge_zone,\n type=self.models.ExtendedLocationTypes.EDGE_ZONE\n )\n return mc",
"def set_up_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if hasattr(self.models, \"ManagedClusterStorageProfile\"):\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def add_to_cluster_dictionary(self, cluster):\n\n self.clusters[self.cluster_idx] = cluster\n update_cluster_array(self, cluster, cluster.cluster_idx)\n\n return",
"def set_up_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity_profile = None\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n kubelet_identity = self.context.get_identity_by_msi_client(assign_kubelet_identity)\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n client_id=kubelet_identity.client_id, # TODO: may remove, rp would take care of this\n object_id=kubelet_identity.principal_id # TODO: may remove, rp would take care of this\n )\n }\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id()\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def set_up_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n # set intermediate\n self.context.set_intermediate(\"azuremonitormetrics_addon_enabled\", True, overwrite_exists=True)\n return mc",
"def set_up_node_resource_group(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.node_resource_group = self.context.get_node_resource_group()\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def set_up_k8s_support_plan(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n support_plan = self.context.get_k8s_support_plan()\n if support_plan == KubernetesSupportPlan.AKS_LONG_TERM_SUPPORT:\n if mc is None or mc.sku is None or mc.sku.tier.lower() != CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM.lower():\n raise AzCLIError(\"Long term support is only available for premium tier clusters.\")\n\n mc.support_plan = support_plan\n return mc"
] |
[
"0.7152228",
"0.6109371",
"0.6080918",
"0.59397835",
"0.58328784",
"0.5819823",
"0.5793942",
"0.5712632",
"0.57011956",
"0.5686649",
"0.56610256",
"0.55625063",
"0.55554956",
"0.55520815",
"0.5481644",
"0.5396167",
"0.5384683",
"0.5381763",
"0.5370491",
"0.53529763",
"0.53496224",
"0.5325599",
"0.53103364",
"0.5307809",
"0.5277482",
"0.52590215",
"0.5178703",
"0.5120756",
"0.5120387",
"0.51119375"
] |
0.75938123
|
0
|
Attach the existing ManagedCluster object to the context in update mode. The `mc` object is only allowed to be attached once, and attaching again will raise a CLIInternalError.
|
def attach_existing_mc(self, mc: ManagedCluster) -> None:
if self.__existing_mc is None:
self.__existing_mc = mc
else:
msg = "the same" if self.__existing_mc == mc else "different"
raise CLIInternalError(
"Attempting to attach the existing `mc` object again, the two objects are {}.".format(
msg
)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def attach_mc(self, mc: ManagedCluster) -> None:\n if self.decorator_mode == DecoratorMode.UPDATE:\n self.attach_existing_mc(mc)\n\n if self.mc is None:\n self.mc = mc\n else:\n msg = \"the same\" if self.mc == mc else \"different\"\n raise CLIInternalError(\n \"Attempting to attach the `mc` object again, the two objects are {}.\".format(\n msg\n )\n )",
"def update_mc(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n return self.put_mc(mc)",
"def update_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def update_tags(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n tags = self.context.get_tags()\n if tags is not None:\n mc.tags = tags\n return mc",
"def add_update_cluster(self, label, cluster):\n self._clusters[label] = cluster",
"def update_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if self.context.get_enable_aad():\n mc.aad_profile = self.models.ManagedClusterAADProfile(\n managed=True\n )\n\n aad_tenant_id = self.context.get_aad_tenant_id()\n aad_admin_group_object_ids = self.context.get_aad_admin_group_object_ids()\n enable_azure_rbac = self.context.get_enable_azure_rbac()\n disable_azure_rbac = self.context.get_disable_azure_rbac()\n if aad_tenant_id is not None:\n mc.aad_profile.tenant_id = aad_tenant_id\n if aad_admin_group_object_ids is not None:\n # ids -> i_ds due to track 2 naming issue\n mc.aad_profile.admin_group_object_i_ds = aad_admin_group_object_ids\n if enable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = True\n if disable_azure_rbac:\n mc.aad_profile.enable_azure_rbac = False\n return mc",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif self.context.raw_param.get(\"enable_addons\") is not None:\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = self.context.get_enable_managed_identity()\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )\n\n # azure monitor metrics addon (v2)\n azuremonitormetrics_addon_enabled = self.context.get_intermediate(\n \"azuremonitormetrics_addon_enabled\",\n default_value=False\n )\n if azuremonitormetrics_addon_enabled:\n # Create the DC* objects, AMW, recording rules and grafana link here\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n True\n )",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def fetch_mc(self) -> ManagedCluster:\n mc = self.client.get(self.context.get_resource_group_name(), self.context.get_name())\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def _ensure_mc(self, mc: ManagedCluster) -> None:\n if not isinstance(mc, self.models.ManagedCluster):\n raise CLIInternalError(\n \"Unexpected mc object with type '{}'.\".format(type(mc))\n )\n\n if self.context.mc != mc:\n raise CLIInternalError(\n \"Inconsistent state detected. The incoming `mc` \"\n \"is not the same as the `mc` in the context.\"\n )",
"def _ensure_mc(self, mc: ManagedCluster) -> None:\n if not isinstance(mc, self.models.ManagedCluster):\n raise CLIInternalError(\n \"Unexpected mc object with type '{}'.\".format(type(mc))\n )\n\n if self.context.mc != mc:\n raise CLIInternalError(\n \"Inconsistent state detected. The incoming `mc` is not the same as the `mc` in the context.\"\n )",
"def init_mc(self) -> ManagedCluster:\n # Initialize a ManagedCluster object with mandatory parameter location.\n mc = self.models.ManagedCluster(\n location=self.context.get_location(),\n )\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc",
"def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None:\n # monitoring addon\n monitoring_addon_enabled = self.context.get_intermediate(\"monitoring_addon_enabled\", default_value=False)\n if monitoring_addon_enabled:\n enable_msi_auth_for_monitoring = self.context.get_enable_msi_auth_for_monitoring()\n if not enable_msi_auth_for_monitoring:\n # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM\n # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud\n cloud_name = self.cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurecloud\":\n from msrestazure.tools import resource_id\n\n cluster_resource_id = resource_id(\n subscription=self.context.get_subscription_id(),\n resource_group=self.context.get_resource_group_name(),\n namespace=\"Microsoft.ContainerService\",\n type=\"managedClusters\",\n name=self.context.get_name(),\n )\n self.context.external_functions.add_monitoring_role_assignment(\n cluster, cluster_resource_id, self.cmd\n )\n elif (\n self.context.raw_param.get(\"enable_addons\") is not None or\n self.context.raw_param.get(\"disable_addons\") is not None\n ):\n # Create the DCR Association here\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd,\n cluster.addon_profiles[CONST_MONITORING_ADDON_NAME],\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=False,\n create_dcra=True,\n enable_syslog=self.context.get_enable_syslog(),\n )\n\n # ingress appgw addon\n ingress_appgw_addon_enabled = self.context.get_intermediate(\"ingress_appgw_addon_enabled\", default_value=False)\n if ingress_appgw_addon_enabled:\n self.context.external_functions.add_ingress_appgw_addon_role_assignment(cluster, self.cmd)\n\n # virtual node addon\n virtual_node_addon_enabled = self.context.get_intermediate(\"virtual_node_addon_enabled\", default_value=False)\n if virtual_node_addon_enabled:\n self.context.external_functions.add_virtual_node_role_assignment(\n self.cmd, cluster, self.context.get_vnet_subnet_id()\n )\n\n # attach acr\n enable_managed_identity = check_is_msi_cluster(cluster)\n attach_acr = self.context.get_attach_acr()\n if enable_managed_identity and attach_acr:\n # Attach ACR to cluster enabled managed identity\n if cluster.identity_profile is None or cluster.identity_profile[\"kubeletidentity\"] is None:\n logger.warning(\n \"Your cluster is successfully created, but we failed to attach \"\n \"acr to it, you can manually grant permission to the identity \"\n \"named <ClUSTER_NAME>-agentpool in MC_ resource group to give \"\n \"it permission to pull from ACR.\"\n )\n else:\n kubelet_identity_object_id = cluster.identity_profile[\"kubeletidentity\"].object_id\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=kubelet_identity_object_id,\n acr_name_or_id=attach_acr,\n subscription_id=self.context.get_subscription_id(),\n is_service_principal=False,\n )",
"def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n )\n }\n user_assigned_identity = self.context.get_assign_identity()\n if not user_assigned_identity:\n user_assigned_identity = self.context.get_user_assignd_identity_from_mc()\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id(user_assigned_identity)\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.storage_profile = self.context.get_storage_profile()\n\n return mc",
"def create_mc(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # Due to SPN replication latency, we do a few retries here\n max_retry = 30\n error_msg = \"\"\n for _ in range(0, max_retry):\n try:\n cluster = self.put_mc(mc)\n return cluster\n # CloudError was raised before, but since the adoption of track 2 SDK,\n # HttpResponseError would be raised instead\n except (CloudError, HttpResponseError) as ex:\n error_msg = str(ex)\n if \"not found in Active Directory tenant\" in ex.message:\n time.sleep(3)\n else:\n raise map_azure_error_to_cli_error(ex)\n raise AzCLIError(\"Maximum number of retries exceeded. \" + error_msg)",
"def update_k8s_support_plan(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n support_plan = self.context.get_k8s_support_plan()\n if support_plan == KubernetesSupportPlan.AKS_LONG_TERM_SUPPORT:\n if mc is None or mc.sku is None or mc.sku.tier.lower() != CONST_MANAGED_CLUSTER_SKU_TIER_PREMIUM.lower():\n raise AzCLIError(\"Long term support is only available for premium tier clusters.\")\n\n mc.support_plan = support_plan\n return mc",
"def existing_mc(self) -> ManagedCluster:\n return self.__existing_mc",
"def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError",
"def set_up_mc_properties(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n mc.tags = self.context.get_tags()\n mc.kubernetes_version = self.context.get_kubernetes_version()\n mc.dns_prefix = self.context.get_dns_name_prefix()\n mc.disk_encryption_set_id = self.context.get_node_osdisk_diskencryptionset_id()\n mc.disable_local_accounts = self.context.get_disable_local_accounts()\n mc.enable_rbac = not self.context.get_disable_rbac()\n return mc",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.UPDATE\n )",
"def update_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n auto_upgrade_channel = self.context.get_auto_upgrade_channel()\n if auto_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel\n\n node_os_upgrade_channel = self.context.get_node_os_upgrade_channel()\n if node_os_upgrade_channel is not None:\n if mc.auto_upgrade_profile is None:\n mc.auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile()\n mc.auto_upgrade_profile.node_os_upgrade_channel = node_os_upgrade_channel\n\n return mc",
"def process_attach_acr(self, mc: ManagedCluster) -> None:\n self._ensure_mc(mc)\n\n attach_acr = self.context.get_attach_acr()\n if attach_acr:\n # If enable_managed_identity, attach acr operation will be handled after the cluster is created\n if not self.context.get_enable_managed_identity():\n service_principal_profile = mc.service_principal_profile\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=service_principal_profile.client_id,\n acr_name_or_id=attach_acr,\n # not actually used\n subscription_id=self.context.get_subscription_id(),\n )",
"def set_up_defender(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n defender = self.context.get_defender_config()\n if defender:\n if mc.security_profile is None:\n mc.security_profile = self.models.ManagedClusterSecurityProfile()\n\n mc.security_profile.defender = defender\n\n return mc",
"def set_up_extended_location(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n edge_zone = self.context.get_edge_zone()\n if edge_zone:\n mc.extended_location = self.models.ExtendedLocation(\n name=edge_zone,\n type=self.models.ExtendedLocationTypes.EDGE_ZONE\n )\n return mc",
"def add_to_cluster_dictionary(self, cluster):\n\n self.clusters[self.cluster_idx] = cluster\n update_cluster_array(self, cluster, cluster.cluster_idx)\n\n return",
"def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n if mc.api_server_access_profile is None:\n profile_holder = self.models.ManagedClusterAPIServerAccessProfile()\n else:\n profile_holder = mc.api_server_access_profile\n\n api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()\n disable_public_fqdn = self.context.get_disable_public_fqdn()\n enable_public_fqdn = self.context.get_enable_public_fqdn()\n if api_server_authorized_ip_ranges is not None:\n # empty string is valid as it disables ip whitelisting\n profile_holder.authorized_ip_ranges = api_server_authorized_ip_ranges\n if disable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = False\n if enable_public_fqdn:\n profile_holder.enable_private_cluster_public_fqdn = True\n\n # keep api_server_access_profile empty if none of its properties are updated\n if (\n profile_holder != mc.api_server_access_profile and\n profile_holder == self.models.ManagedClusterAPIServerAccessProfile()\n ):\n profile_holder = None\n mc.api_server_access_profile = profile_holder\n return mc",
"def ModifyCluster(self, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n body = kwargs\n\n return self._SendRequest(HTTP_PUT,\n \"/%s/modify\" % GANETI_RAPI_VERSION, query, body)",
"def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n enable_ahub = self.context.get_enable_ahub()\n disable_ahub = self.context.get_disable_ahub()\n windows_admin_password = self.context.get_windows_admin_password()\n enable_windows_gmsa = self.context.get_enable_windows_gmsa()\n\n if any([enable_ahub, disable_ahub, windows_admin_password, enable_windows_gmsa]) and not mc.windows_profile:\n # seems we know the error\n raise UnknownError(\n \"Encounter an unexpected error while getting windows profile from the cluster in the process of update.\"\n )\n\n if enable_ahub:\n mc.windows_profile.license_type = 'Windows_Server'\n if disable_ahub:\n mc.windows_profile.license_type = 'None'\n if windows_admin_password:\n mc.windows_profile.admin_password = windows_admin_password\n if enable_windows_gmsa:\n gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name()\n mc.windows_profile.gmsa_profile = self.models.WindowsGmsaProfile(\n enabled=True,\n dns_server=gmsa_dns_server,\n root_domain_name=gmsa_root_domain_name,\n )\n return mc",
"def update_mc_profile_default(self) -> ManagedCluster:\n # check raw parameters\n # promt y/n if no options are specified to ask user whether to perform a reconcile operation\n self.check_raw_parameters()\n # fetch the ManagedCluster object\n mc = self.fetch_mc()\n # update agentpool profile by the agentpool decorator\n mc = self.update_agentpool_profile(mc)\n # update auto scaler profile\n mc = self.update_auto_scaler_profile(mc)\n # update tags\n mc = self.update_tags(mc)\n # attach or detach acr (add or delete role assignment for acr)\n self.process_attach_detach_acr(mc)\n # update sku (uptime sla)\n mc = self.update_sku(mc)\n # update outbound type\n mc = self.update_outbound_type_in_network_profile(mc)\n # update load balancer profile\n mc = self.update_load_balancer_profile(mc)\n # update nat gateway profile\n mc = self.update_nat_gateway_profile(mc)\n # update disable/enable local accounts\n mc = self.update_disable_local_accounts(mc)\n # update api server access profile\n mc = self.update_api_server_access_profile(mc)\n # update windows profile\n mc = self.update_windows_profile(mc)\n # update network plugin settings\n mc = self.update_network_plugin_settings(mc)\n # update aad profile\n mc = self.update_aad_profile(mc)\n # update oidc issuer profile\n mc = self.update_oidc_issuer_profile(mc)\n # update auto upgrade profile\n mc = self.update_auto_upgrade_profile(mc)\n # update identity\n mc = self.update_identity(mc)\n # update addon profiles\n mc = self.update_addon_profiles(mc)\n # update defender\n mc = self.update_defender(mc)\n # update workload identity profile\n mc = self.update_workload_identity_profile(mc)\n # update stroage profile\n mc = self.update_storage_profile(mc)\n # update azure keyvalut kms\n mc = self.update_azure_keyvault_kms(mc)\n # update image cleaner\n mc = self.update_image_cleaner(mc)\n # update identity\n mc = self.update_identity_profile(mc)\n # set up http proxy config\n mc = self.update_http_proxy_config(mc)\n # update workload autoscaler profile\n mc = self.update_workload_auto_scaler_profile(mc)\n # update kubernetes support plan\n mc = self.update_k8s_support_plan(mc)\n # update azure monitor metrics profile\n mc = self.update_azure_monitor_profile(mc)\n return mc"
] |
[
"0.7666705",
"0.64610124",
"0.61480016",
"0.5906628",
"0.58978236",
"0.5808993",
"0.57912594",
"0.57744676",
"0.57592237",
"0.57166994",
"0.57078767",
"0.5704928",
"0.5699452",
"0.56803733",
"0.5608162",
"0.5579427",
"0.55647975",
"0.54819435",
"0.5457632",
"0.54316694",
"0.54044145",
"0.5381602",
"0.5375234",
"0.5335805",
"0.5325051",
"0.5309743",
"0.529873",
"0.5256039",
"0.524363",
"0.52407444"
] |
0.7262131
|
1
|
Attach the AKSAgentPoolContext object to the context. The `agentpool_context` object is only allowed to be attached once, and attaching again will raise a CLIInternalError.
|
def attach_agentpool_context(self, agentpool_context: AKSAgentPoolContext) -> None:
if self.agentpool_context is None:
self.agentpool_context = agentpool_context
else:
msg = "the same" if self.agentpool_context == agentpool_context else "different"
raise CLIInternalError(
"Attempting to attach the `agentpool_context` object again, the two objects are {}.".format(
msg
)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def init_agentpool_decorator_context(self) -> None:\n self.agentpool_decorator = AKSAgentPoolAddDecorator(\n self.cmd, self.client, self.__raw_parameters, self.resource_type, self.agentpool_decorator_mode\n )\n self.agentpool_context = self.agentpool_decorator.context\n self.context.attach_agentpool_context(self.agentpool_context)",
"def init_agentpool_decorator_context(self) -> None:\n self.agentpool_decorator = AKSAgentPoolUpdateDecorator(\n self.cmd, self.client, self.__raw_parameters, self.resource_type, self.agentpool_decorator_mode\n )\n self.agentpool_context = self.agentpool_decorator.context\n self.context.attach_agentpool_context(self.agentpool_context)",
"def attach_proxy(self, proxy):\n if self.current_proxy:\n raise ValueError(\"A proxy is already attached!\")\n logger.info(\"Attaching proxy for context: {}\".format(proxy.context_alias))\n self.current_proxy = proxy",
"def add(self, context):\n self._contexts.add(context)",
"def add(self, context, action, reward):\n\n if self.intercept:\n c = np.array(context[:])\n c = np.append(c, 1.0).reshape((1, self.context_dim + 1))\n else:\n c = np.array(context[:]).reshape((1, self.context_dim))\n\n if self.contexts is None:\n self.contexts = c\n else:\n self.contexts = np.vstack((self.contexts, c))\n\n r = np.zeros((1, self.num_actions))\n r[0, action] = reward\n if self.rewards is None:\n self.rewards = r\n else:\n self.rewards = np.vstack((self.rewards, r))\n\n self.actions.append(action)",
"def __enter__(self):\n QueuingContext._active_contexts.append(self)\n\n return self",
"def add_context(self, objmap, ctx):\n zing_state = self._get_zing_tx_state()\n zing_state.datamaps_contexts[objmap] = ObjectMapContext(ctx)",
"def add_instance(self, context):\r\n self.instance_contexts.append(context)\r\n self.total_count += 1",
"def _add_pool ( self, pool ):\n self._pool_id += 1\n try:\n self._poolstack.append ( pool )\n except:\n self._pool_id -= 1\n raise\n\n self._update_resolver()",
"def set_up_agentpool_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n agentpool_profile = self.agentpool_decorator.construct_agentpool_profile_default()\n mc.agent_pool_profiles = [agentpool_profile]\n return mc",
"def push_context(self):\n raise NotImplementedError()",
"def attach(self, agent, period, name=None):\n if isinstance(agent, Agent):\n agent = agent.clone(name=name, period=period)\n else:\n agent = Agent(name, agent, period=period)\n self.agents.append(agent)\n return self",
"def add_context(self, address: int) -> None:\n\n if address not in self.all_contexts:\n self.current_contexts.append(address)\n self.all_contexts.append(address)",
"def attach(self, bo):\n self.bo = bo\n for name, feature in self._d_features.items():\n feature.attach(bo)",
"def assign_opengl_context(self, context):\n self.assigned_opengl_context = context",
"def _context_new(self):\n assert self._pa_mainloop is not None\n app_name = self._get_app_name()\n context = pa.pa_context_new(self._pa_mainloop,\n app_name.encode('ASCII')\n )\n return context",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.CREATE\n )",
"def push_context(self, ctx):\n self._tpl_context = ctx",
"def push_subcontext(self, context):\n self._active_contexts.append(context)\n if context not in self.subcontexts:\n self.subcontexts.append(context)",
"def cmd_context(self, cmd_context):\n\n self._cmd_context = cmd_context",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.UPDATE\n )",
"async def __aenter__(self) -> 'NodePool':\n\n return await self.open()",
"def create_pool(self, context, pool):\n LOG.info(\"Received request 'Create Pool' for Pool:%(pool_id)s \",\n {'pool_id': pool['id']})\n arg_dict = {'context': context,\n lb_const.POOL: pool\n }\n # REVISIT(jiahao) M:N pool is not yet implemented.\n self._send_event(lb_const.EVENT_CREATE_POOL_V2, arg_dict,\n serialize=True,\n binding_key=pool['loadbalancer_id'],\n key=pool['id'])",
"def switch_context(self, context):\r\n self.context_stack.append(self.current_context)\r\n self.current_context = context",
"def merge_context(self, context):\n context.update(self._context)\n self._context = context",
"def PushContext (cls, ctx):\n assert isinstance(ctx, cls)\n cls.__ContextStack.append(ctx)\n return ctx",
"def activate(cls, ctx):\r\n if hasattr(ctx, '_on_context_exit'):\r\n raise cls.ContextError('Context actions registered outside this parse context arg active')\r\n\r\n try:\r\n cls._active.append(ctx)\r\n ctx._on_context_exit = []\r\n yield\r\n finally:\r\n for func, args, kwargs in ctx._on_context_exit:\r\n func(*args, **kwargs)\r\n del ctx._on_context_exit\r\n cls._active.pop()",
"def context(self, context):\n self._context = context",
"def context(self, context):\n\n self._context = context",
"def _bind_device_context():\n _bind_device_ctx()"
] |
[
"0.72640747",
"0.69137096",
"0.54524636",
"0.5199811",
"0.5116517",
"0.51051664",
"0.50245696",
"0.5019702",
"0.49775326",
"0.48670903",
"0.48327",
"0.47892487",
"0.47868285",
"0.47780704",
"0.4746131",
"0.46875876",
"0.4678888",
"0.4650176",
"0.46418124",
"0.45968106",
"0.45772907",
"0.45625004",
"0.455172",
"0.45337868",
"0.45336866",
"0.450982",
"0.45077324",
"0.44706252",
"0.44645905",
"0.44601345"
] |
0.8392019
|
0
|
Helper function to parse and verify cluster_autoscaler_profile. If the user input is a list, parse it with function "extract_comma_separated_string". If the type of user input or parsed value is not a dictionary, raise an InvalidArgumentValueError. Otherwise, take the keys from the attribute map of ManagedClusterPropertiesAutoScalerProfile to verify whether the keys in the keyvalue pairs provided by the user are valid. If not, raise an InvalidArgumentValueError.
|
def __validate_cluster_autoscaler_profile(
self, cluster_autoscaler_profile: Union[List, Dict, None]
) -> Union[Dict, None]:
if cluster_autoscaler_profile is not None:
# convert list to dict
if isinstance(cluster_autoscaler_profile, list):
params_dict = {}
for item in cluster_autoscaler_profile:
params_dict.update(
extract_comma_separated_string(
item,
extract_kv=True,
allow_empty_value=True,
default_value={},
)
)
cluster_autoscaler_profile = params_dict
# check if the type is dict
if not isinstance(cluster_autoscaler_profile, dict):
raise InvalidArgumentValueError(
"Unexpected input cluster-autoscaler-profile, value: '{}', type '{}'.".format(
cluster_autoscaler_profile,
type(cluster_autoscaler_profile),
)
)
# verify keys
# pylint: disable=protected-access
valid_keys = list(
k.replace("_", "-") for k in self.models.ManagedClusterPropertiesAutoScalerProfile._attribute_map.keys()
)
for key in cluster_autoscaler_profile.keys():
if not key:
raise InvalidArgumentValueError("Empty key specified for cluster-autoscaler-profile")
if key not in valid_keys:
raise InvalidArgumentValueError(
"'{}' is an invalid key for cluster-autoscaler-profile. Valid keys are {}.".format(
key, ", ".join(valid_keys)
)
)
return cluster_autoscaler_profile
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_cluster_autoscaler_profile(self, read_only: bool = False) -> Union[Dict[str, str], None]:\n # read the original value passed by the command\n cluster_autoscaler_profile = self.raw_param.get(\"cluster_autoscaler_profile\")\n # parse and validate user input\n cluster_autoscaler_profile = self.__validate_cluster_autoscaler_profile(cluster_autoscaler_profile)\n\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.auto_scaler_profile is not None:\n cluster_autoscaler_profile = self.mc.auto_scaler_profile\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return cluster_autoscaler_profile\n\n # dynamic completion for update mode only\n if not read_only and self.decorator_mode == DecoratorMode.UPDATE:\n if cluster_autoscaler_profile and self.mc and self.mc.auto_scaler_profile:\n # shallow copy should be enough for string-to-string dictionary\n copy_of_raw_dict = self.mc.auto_scaler_profile.__dict__.copy()\n new_options_dict = dict(\n (key.replace(\"-\", \"_\"), value)\n for (key, value) in cluster_autoscaler_profile.items()\n )\n copy_of_raw_dict.update(new_options_dict)\n cluster_autoscaler_profile = copy_of_raw_dict\n\n # this parameter does not need validation\n return cluster_autoscaler_profile",
"def _check_settings_validity(self, settings: list):\n\n if isinstance(settings, list):\n # if list is empty\n if not settings:\n raise ValueError('The given settings are an empty list, please make sure to add a dictionary with a key \\'CLF_NAME\\' and a corresponding classfier name as value. You can specify hyperparameters for the classifier with the key \\'HYPERPARAMS\\'.')\n \n # if not all entries in the list are of type dict raise an error\n if not all(isinstance(s, dict) for s in settings):\n raise TypeError(f'Elements in settings are expected to be of type \\'dict\\'.')\n\n for setting in settings:\n # if there is no CLF_NAME key in the dict of the setting entry raise an error\n if 'CLF_NAME' not in setting.keys():\n raise KeyError(f'Every entry in settings is required to have a \\'CLF_NAME\\' key, please make sure that this key exists in every entry in settings.')\n \n # get the classifier and its corresponding parameters\n classifier = self._get_classifier_to_name(setting['CLF_NAME'])\n\n # check if the classifier also has a predict_proba() function\n if not(hasattr(classifier,'predict_proba') and callable(getattr(classifier,'predict_proba'))):\n raise ValueError('')\n \n clf_params_keys = classifier.get_params().keys()\n\n # check if hyperparameters are given as list or as dict\n if 'HYPERPARAMS' in setting.keys():\n hyperparams = setting['HYPERPARAMS']\n\n # if given as list, all elements in the list must be of type dict\n if isinstance(hyperparams, list):\n # if hyperparameter list is empty\n if not hyperparams:\n raise ValueError('The given hyperparameters are an empty list, please make sure to add hyperparameters as \\'dict\\' where a key represents the parameter name and the value is the parameter value/values wrapped in a list.')\n\n if not all(isinstance(s, dict) for s in settings):\n raise TypeError(f'Elements in the settings hyperparameters are expected to be of type \\'dict\\'.')\n \n # loop through the dicts in HYPERPARAMS\n for hyperparams_entry in hyperparams:\n # for each dict check if the keys are valid paramters of the corresponding classifier\n for hyperparams_entry_key in hyperparams_entry.keys():\n # check if the value to the key is a list otherwise raise an error:\n hyperparams_entry_value = hyperparams_entry[hyperparams_entry_key]\n \n if not isinstance(hyperparams_entry_value, list):\n raise TypeError(f'The hyperparameter {hyperparams_entry_key} in the {classifier.__class__.__name__} settings must be of type \\'list\\', got type \\'{type(hyperparams_entry_value).__name__}\\', make sure that every specified hyperparameter is wrapped in a list.')\n\n # if the parameter value list is empty\n if not hyperparams_entry_value:\n raise ValueError(f'Valuelist for hyperparameter {hyperparams_entry_key} is empty. Please specify values for the hyperparameter {hyperparams_entry_key} or remove it from HYPERPARAMS.')\n\n # if the key is not in the parameters specified by sklearn raise an error\n if not hyperparams_entry_key in clf_params_keys:\n raise NameError(f'The specified hyperparameter {hyperparams_entry_key} is not a supported paramter of {classifier.__class__.__name__}, make sure to only use supported parameters (see the sklearn documentation of {classifier.__class__.__name__} for a list of valid parameters).')\n \n # if given as dict just check if the keys are valid paramters of the corresponding classifier\n elif isinstance(hyperparams, dict):\n for hyperparam_key in hyperparams.keys():\n # check if the value to the key is a list otherwise raise an error:\n hyperparams_value = hyperparams[hyperparam_key]\n\n if not isinstance(hyperparams_value, list):\n raise TypeError(f'The hyperparameter {hyperparam_key} in the {classifier.__class__.__name__} settings must be of type \\'list\\', got type \\'{type(hyperparams_value).__name__}\\', make sure that every specified hyperparameter is wrapped in a list.')\n \n # if the key is not in the parameters specified by sklearn raise an error\n if not hyperparam_key in clf_params_keys:\n raise NameError(f'The specified hyperparameter {hyperparam_key} is not a supported paramter of {classifier.__class__.__name__}, make sure to only use supported parameters (see the sklearn documentation of {classifier.__class__.__name__} for a list of valid parameters).')\n\n else:\n raise TypeError(f'Hyperparameters in settings must be either of type \\'dict\\' or \\'list\\', got type \\'{type(hyperparams).__name__}\\'')\n\n else:\n raise TypeError(f'Settings must be of type \\'list\\', passed settings are of type \\'{type(settings).__name__}\\'')",
"def check_args(args):\n map_args = {}\n\n if args['frequencies'] is None:\n return None\n\n if args['instance_type'] is None:\n return None\n\n if args['name'] is None:\n return None\n\n instance_details = AWS_INSTANCES.get(args['instance_type'])\n if instance_details is None:\n LOGGER.error('The instance type {0} is not supported.'.format(args['instance_type']))\n return None\n else:\n LOGGER.info(\n 'instance: {0}, vCPU: {1}, RAM: {2}GB, Disks: {3}x{4}GB, IOPS: {5}'.format(\n args['instance_type'],\n instance_details.vCPU,\n instance_details.memory,\n instance_details.number_disks,\n instance_details.size,\n instance_details.iops_support))\n\n map_args.update({\n 'ami_id': args['ami_id'] if args['ami_id'] is not None else AWS_AMI_ID,\n 'created_by': args['created_by'] if args['created_by'] is not None else getpass.getuser(),\n 'spot_price': args['spot_price'] if args['spot_price'] is not None else None,\n 'user_data': get_script(args['bash_script'] if args['bash_script'] is not None else BASH_SCRIPT_CLEAN_02),\n 'setup_disks': get_script(BASH_SCRIPT_SETUP_DISKS),\n 'instance_details': instance_details,\n })\n return map_args",
"def ValidateAutoscalingMetricSpecs(specs):\n if specs is None:\n return\n\n for key, value in specs.items():\n if key not in constants.OP_AUTOSCALING_METRIC_NAME_MAPPER:\n raise exceptions.InvalidArgumentException(\n '--autoscaling-metric-specs',\n \"\"\"Autoscaling metric name can only be one of the following: {}.\"\"\"\n .format(', '.join([\n \"'{}'\".format(c) for c in sorted(\n constants.OP_AUTOSCALING_METRIC_NAME_MAPPER.keys())\n ])))\n\n if value <= 0 or value > 100:\n raise exceptions.InvalidArgumentException(\n '--autoscaling-metric-specs',\n 'Metric target value %s is not between 0 and 100.' % value)",
"def _validate_container_properties(container_properties, prefix=None):\n if not prefix:\n prefix = 'container_properties'\n\n container_config = [\n {\n 'field_name': 'image',\n 'field_value': container_properties.get('image'),\n 'prefix': prefix,\n 'required_type': str,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'vcpus',\n 'field_value': container_properties.get('vcpus'),\n 'prefix': prefix,\n 'required_type': int,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'memory',\n 'field_value': container_properties.get('memory'),\n 'prefix': prefix,\n 'required_type': int,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'command',\n 'field_value': container_properties.get('command'),\n 'prefix': prefix,\n 'required_type': list,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'job_role_arn',\n 'field_value': container_properties.get('job_role_arn'),\n 'prefix': prefix,\n 'required_type': str,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'execution_role_arn',\n 'field_value': container_properties.get('execution_role_arn'),\n 'prefix': prefix,\n 'required_type': str,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'volumes',\n 'field_value': container_properties.get('volumes'),\n 'prefix': prefix,\n 'required_type': list,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'volumes',\n 'field_value': container_properties.get('volumes'),\n 'prefix': prefix,\n 'required_type': list,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'environment',\n 'field_value': container_properties.get('environment'),\n 'prefix': prefix,\n 'required_type': list,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'mount_points',\n 'field_value': container_properties.get('mount_points'),\n 'prefix': prefix,\n 'required_type': list,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'readonly_root_filesystem',\n 'field_value': container_properties.get('readonly_root_filesystem'),\n 'prefix': prefix,\n 'required_type': bool,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'privileged',\n 'field_value': container_properties.get('privileged'),\n 'prefix': prefix,\n 'required_type': bool,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'ulimits',\n 'field_value': container_properties.get('ulimits'),\n 'prefix': prefix,\n 'required_type': list,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'user',\n 'field_value': container_properties.get('user'),\n 'prefix': prefix,\n 'required_type': str,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'instance_type',\n 'field_value': container_properties.get('instance_type'),\n 'prefix': prefix,\n 'required_type': str,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'resource_requirements',\n 'field_value': container_properties.get('resource_requirements'),\n 'prefix': prefix,\n 'required_type': dict,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'linux_parameters',\n 'field_value': container_properties.get('linux_parameters'),\n 'prefix': prefix,\n 'required_type': dict,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'log_configuration',\n 'field_value': container_properties.get('log_configuration'),\n 'prefix': prefix,\n 'required_type': dict,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'secrets',\n 'field_value': container_properties.get('secrets'),\n 'prefix': prefix,\n 'required_type': list,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'network_configuration',\n 'field_value': container_properties.get('network_configuration'),\n 'prefix': prefix,\n 'required_type': dict,\n 'validators': [\n _validate_field_type\n ]\n },\n {\n 'field_name': 'fargate_platform_configuration',\n 'field_value': container_properties.get('fargate_platform_configuration'),\n 'prefix': prefix,\n 'required_type': dict,\n 'validators': [\n _validate_field_type\n ]\n },\n ]\n _process_config(container_config)",
"def parse_input(input_data):\n cleaned_input = input_data.replace('@ ', '').replace(':', '')\n split_data = cleaned_input.split(' ')\n xy = split_data[1].split(',')\n wh = split_data[2].split('x')\n return FabricRectangle(\n claim_id=split_data[0],\n x=int(xy[0]),\n y=int(xy[1]),\n width=int(wh[0]),\n height=int(wh[1])\n )",
"def validate_input(self, argin):\n try:\n configuration_dict = json.loads(argin)\n _ = configuration_dict[\"id\"]\n except (KeyError, JSONDecodeError) as err:\n msg = f\"Validate configuration failed with error:{err}\"\n self.logger.error(msg)\n return (None, ResultCode.FAILED, msg)\n except Exception as other_errs:\n msg = f\"Validate configuration failed with unknown error:{other_errs}\"\n self.logger.error(msg)\n return (None, ResultCode.FAILED, msg)\n\n return (\n configuration_dict,\n ResultCode.OK,\n \"ConfigureScan arguments validation successful\",\n )",
"def validate_profile_choice(dims):\n\n if dims[0] > 1:\n profile_choice = int(input(\"Multiple profiles detected.\\nplease choose which profile to use.\\n\"))\n while profile_choice not in range(dims[0]):\n profile_choice = int(input(\"Incorrect selection.\\nplease choose {}.\\n\".format(range(dims[0])))) \n else:\n profile_choice = 0\n\n\n return profile_choice",
"def _ValidateArgs(self, args):\n if not (args.IsSpecified('description') or\n args.IsSpecified('security_policy')):\n parameter_names = ['--description', '--security_policy']\n raise exceptions.MinimumArgumentException(\n parameter_names, 'Please specify at least one property to update')",
"def test_is_valid_annotation_value_valid_input():\n # test valid label values\n assert is_valid_annotation_value(value=None)\n assert is_valid_annotation_value(value=\"\")\n assert is_valid_annotation_value(value=\"l0L\")\n assert is_valid_annotation_value(value=\"L-l\")\n assert is_valid_annotation_value(value=\"L.L\")\n assert is_valid_annotation_value(value=\"l_4\")\n assert is_valid_annotation_value(value=\"4-you\")\n assert is_valid_annotation_value(value=\"You.2\")",
"def _validate(autologging_call_input, user_call_input=None):\n\n if user_call_input is None and autologging_call_input is not None:\n _validate_new_input(autologging_call_input)\n return\n\n assert type(autologging_call_input) == type(\n user_call_input\n ), \"Type of input to original function '{}' does not match expected type '{}'\".format(\n type(autologging_call_input), type(user_call_input)\n )\n\n if type(autologging_call_input) in [list, tuple]:\n _assert_autologging_input_positional_args_are_superset(\n autologging_call_input, user_call_input\n )\n # If the autologging call input is longer than the user call input, we `zip_longest`\n # will pad the user call input with `None` values to ensure that the subsequent calls\n # to `_validate` identify new inputs added by the autologging call\n for a, u in itertools.zip_longest(autologging_call_input, user_call_input):\n _validate(a, u)\n elif type(autologging_call_input) == dict:\n _assert_autologging_input_kwargs_are_superset(autologging_call_input, user_call_input)\n for key in autologging_call_input.keys():\n _validate(autologging_call_input[key], user_call_input.get(key, None))\n else:\n assert (\n autologging_call_input is user_call_input\n or autologging_call_input == user_call_input\n ), (\n \"Input to original function does not match expected input.\"\n f\" Original: '{autologging_call_input}'. Expected: '{user_call_input}'\"\n )",
"def _check_args(self, args):\n if not isinstance(args, list) or not len(args) >= 2:\n raise FunctionArgumentException(\"Argument of attribute getter \"\n \"function '%s' must be a list of \"\n \"indeces; got: '%s'\" % (\n self.name,\n args\n ))\n\n if not is_homogeneous(args, (str, int)):\n raise FunctionArgumentException(\n \"'%s': argument must be a list of strings; got: '%s'\" %\n (self.name, args)\n )",
"def test_parsingValues(self):\n argV = (\"--fooint 912 --foofloat -823.1 \"\n \"--eggint 32 --eggfloat 21\").split()\n self.usage.parseOptions(argV)\n self.failUnlessEqual(self.usage.opts['fooint'], 912)\n self.assert_(isinstance(self.usage.opts['fooint'], int))\n self.failUnlessEqual(self.usage.opts['foofloat'], -823.1)\n self.assert_(isinstance(self.usage.opts['foofloat'], float))\n self.failUnlessEqual(self.usage.opts['eggint'], 32)\n self.assert_(isinstance(self.usage.opts['eggint'], int))\n self.failUnlessEqual(self.usage.opts['eggfloat'], 21.)\n self.assert_(isinstance(self.usage.opts['eggfloat'], float))",
"def validate_args(self, in_args, cmd_call):\n valid_1, valid_2 = None, None\n\n if len(in_args) > 0 and type(in_args) is not list:\n args = in_args.split()\n valid_1 = args[0]\n elif type(in_args) is list and len(in_args) > 0:\n args = in_args\n valid_1 = args[0]\n else:\n args = []\n\n if cmd_call in ['default']:\n # Default : Returns a valid cui type for an input cui\n # checks to see if there is more than 2 arguments\n # if so, arg[0] may be a valid code\n # arg[1] may be a valid code type\n # if not ask the user what type of code type arg[0] is\n # valid_1 = valid cui type\n # valid_2 = None\n while True:\n if len(args) >= 2 and len(args) <= 3:\n input_type = args[1].upper()\n else:\n input_type = input(\"What type of id is '{0}'? [LOCAL/RXCUI/NDC/SNOMED]\".format(args[0])).upper()\n\n # Confirm it's a valid code type\n valid_type = self.validate_id_type(input_type)\n # Valid type is a boolean of True\n if isinstance(valid_type, str) or valid_type is None:\n return None\n elif valid_type:\n break\n elif not valid_type:\n print('Invalid Option, Please Try Again')\n continue\n valid_1 = input_type\n\n elif cmd_call in self.cmd_config_default:\n # valid_1 : Valid Cui , valid_2 : Valid Cui Type\n valid_2, _ = self.validate_args(args, 'default')\n valid_1 = args[0]\n\n elif cmd_call == 'code_lookup':\n # args[0] : Initial CUI, args[1] : Initial CUI Type, args[2] : Target CUI Type\n # valid_1 : valid cui, valid_2 : list valid source and target\n _dict_opts = util.OPTIONS_CUI_TYPES.copy()\n _avail = list(set(smores.get_dict_sources()) & set(_dict_opts))\n if len(_avail) == 0 and len(args) < 2:\n print('There are no available starting cui types that can be crosswalked.\\n'\n 'Please load a file containing valid cui types: {0}'.format(_dict_opts))\n return False, None\n\n if len(args) >= 2:\n if len(args) == 3:\n # provided cui, cui source, and target\n valid_2, _ = self.validate_args(args, 'default')\n source, target = args[1].upper(), args[2].upper()\n else:\n source, target = args[0].upper(), args[1].upper()\n valid_1 = simple_input(\"Is {0} the correct starting source? \".format(source), ['YES', 'NO', 'exit'])\n if valid_1 == 'exit':\n return False, None\n # TODO need path for valid_2\n else:\n valid_1 = simple_input(\"Which code set do you want to start with?\", _avail)\n if valid_1 != 'exit':\n _dict_opts.remove(valid_1) # Don't lookup what we've already got\n valid_2 = simple_input(\"Which code set do you want to get results for?\", _dict_opts)\n if valid_2 == 'exit':\n return False, None\n else:\n return False, None\n\n elif cmd_call == 'errors':\n _current_err = list(self.errors.keys())\n if len(args) > 1:\n smores_error('#Cx001.7', console_p=True)\n return\n elif len(args) == 1 and args[0].lower() in _current_err:\n valid_1 = args[0]\n elif len(args) == 1:\n print('There are currently no errors logged for that command.')\n return\n else:\n valid_1 = simple_input(\"Please choose a command from the list to see errors: \", _current_err)\n\n elif cmd_call in ['csv', 'remap', 'fhir', 'json']:\n # Format: [File] [Output]\n if not self.inputs['loaded']:\n print(\"No Files Loaded!\\nYou Must load a file containing local medications first\")\n else:\n _file_opts = list(self.inputs['files'].keys()) + ['All']\n _dict_opts = list(smores.get_dict_sources(True)) #+ ['All']\n _file_or_dict = None\n\n if cmd_call in ['csv', 'json']:\n if len(args) == 0:\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n elif args[0] not in _file_opts and args[0] not in _dict_opts:\n print('That option was not recognized as a valid source.')\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n else:\n valid_1 = args[0]\n\n if _file_or_dict.upper() == 'FILE':\n valid_1 = 'FILE|' + simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n elif _file_or_dict.upper() == 'DICTIONARY':\n valid_1 = 'DICT|' + simple_input(\"Please choose a code dictionary to output\", _dict_opts, True)\n elif _file_or_dict.upper() == 'EXIT':\n return None, None\n\n else:\n valid_1 = simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n if cmd_call in ['csv', 'json', 'fhir']:\n if len(args) == 2 and len(args[1]) > 0:\n valid_2 = args[1]\n else:\n valid_2= input(\"Please provide an output file name:\").strip()\n\n if len(valid_2) > 0:\n if \".\" in valid_2:\n valid_2, ext = valid_2.split(\".\")\n else:\n valid_2 = ''\n print('Empty file name provided, using default.')\n else:\n valid_2 = args[0]\n\n elif cmd_call == 'file':\n re_use = False\n if self.inputs['loaded'] and len(in_args) == 0:\n print(\"The following file(s) have already been loaded: \\n\" + str(self.inputs['files']))\n _load_more = simple_input(\"Would you like to load an additional file?\", ['Y', 'N', 'exit'])\n if _load_more == 'Y':\n pass\n elif _load_more == 'N':\n _re_use = simple_input(\"Would you like to re-use a loaded file?\", ['Y', 'N', 'exit'])\n if _re_use == 'Y':\n re_use = True\n else:\n return False, None\n else:\n return False, None\n\n if in_args is not None and len(in_args) > 0:\n valid_1 = in_args\n else:\n valid_1 = input(\"Please enter the name of the file to load: \") if not re_use else simple_input(\n 'Select the file to be used: ', list(self.inputs['files'].keys()), index=True)\n\n while True:\n if valid_1 in self.inputs['files']:\n if not re_use:\n print(\"It looks like you've already loaded that file. Please try a different file.\")\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n elif len(valid_1) == 0:\n smores_error('#Cx001.7', logger=smoresLog)\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n\n if not resolve_target_path(valid_1):\n valid_1, valid_2 = self.validate_args('', 'file')\n\n elif '.smr' in valid_1:\n if len(self.inputs['files']) > 0:\n print(\n 'It looks like you are trying to load a session, this will replace the current session and '\n 'all previous work.')\n _save = simple_input('Do you want to save the current session first?', ['Y', 'N', 'EXIT'])\n if _save == 'Y':\n smores.save_session(self.__version__)\n elif _save == 'EXIT':\n return False, None\n valid_2 = 'session'\n else:\n valid_2 = 'file'\n\n smoresLog.debug('Args: {0}, Validated as: {1}'.format(valid_1, valid_2))\n return valid_1, valid_2",
"def test_X_approximate_distribution_is_str(self):\n\n # Check valid case of \"count\" which is not included in valid object\n self.validator.adata.uns[\"X_approximate_distribution\"] = \"count\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n # Invalid type: list\n self.validator.adata.uns[\"X_approximate_distribution\"] = [\"count\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' \"\n \"is not valid, it must be a string.\"\n ],\n )",
"def _check_annotations(value):\n if isinstance(value, dict):\n for k, v in value.items():\n _check_annotations(v)\n elif isinstance(value, list):\n for element in value:\n _check_annotations(element)\n elif isinstance(value, numpy.ndarray):\n if value.dtype not in (numpy.integer, numpy.floating, numpy.complex) \\\n and value.dtype.type != numpy.string_:\n raise ValueError(\"Invalid annotation. NumPy arrays with dtype %s are not allowed\" % value.dtype)\n elif not isinstance(value, ALLOWED_ANNOTATION_TYPES):\n raise ValueError(\"Invalid annotation. Annotations of type %s are not allowed\" % type(value))",
"def clean_values(cls, cleaned_input, attribute):\n values_input = cleaned_input.get(cls.ATTRIBUTE_VALUES_FIELD)\n attribute_input_type = cleaned_input.get(\"input_type\") or attribute.input_type\n\n if values_input is None:\n return\n\n if (\n values_input\n and attribute_input_type not in AttributeInputType.TYPES_WITH_CHOICES\n ):\n raise ValidationError(\n {\n cls.ATTRIBUTE_VALUES_FIELD: ValidationError(\n \"Values cannot be used with \"\n f\"input type {attribute_input_type}.\",\n code=AttributeErrorCode.INVALID.value,\n )\n }\n )\n\n is_swatch_attr = attribute_input_type == AttributeInputType.SWATCH\n for value_data in values_input:\n cls._validate_value(attribute, value_data, is_swatch_attr)\n\n cls.check_values_are_unique(values_input, attribute)",
"def parse_key_value_pairs(arg_string):\n try:\n return {key: value for (key, value) in [tuple(str(arg).split('=', 1)) for arg in arg_string]}\n except ValueError:\n raise click.ClickException(\"argument string must be in the form x=y\")",
"def test_allowed_string(self):\n val = DwcaValidator(yaml.load(self.yaml_allowed_string, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'abundance': 'many'}\n self.assertTrue(val.validate(document))\n document = {'abundance': 'female'}\n self.assertFalse(val.validate(document))",
"def _GkeNodePoolAcceleratorConfigFromArgPool(dataproc, arg_accelerators):\n accelerators = []\n for arg_accelerator in arg_accelerators.split(';'):\n if '=' not in arg_accelerator:\n raise exceptions.InvalidArgumentException(\n '--pools', 'accelerators value \"%s\" does not match the expected '\n '\"ACCELERATOR_TYPE=ACCELERATOR_VALUE\" pattern.' % arg_accelerator)\n\n accelerator_type, count_string = arg_accelerator.split('=', 1)\n try:\n count = int(count_string)\n accelerators.append(\n dataproc.messages.GkeNodePoolAcceleratorConfig(\n acceleratorCount=count,\n acceleratorType=accelerator_type,\n ))\n except ValueError:\n raise exceptions.InvalidArgumentException(\n '--pools',\n 'Unable to parse accelerators count \"%s\" as an integer.' %\n count_string)\n return accelerators",
"def _parse_and_validate(self, val):\n if self._is_parameter_type:\n val = self._parse(val) if isinstance(val, str) else val\n self._validate_or_throw(val)\n return val",
"def validate_is_in(var: Any,\n var_name: str,\n list_type: Any,\n class_name: Optional[str] = None,\n log_metadata_validation_failures: bool = True) -> None:\n if var is None:\n return\n sorted_list_type = sorted(map(str, list_type))\n if log_metadata_validation_failures:\n if class_name is None:\n logging.debug(\n \"XAI Validation :: Metadata: Variable `%s` should be a member of \"\n \"`%s`\", var_name, sorted_list_type)\n else:\n logging.debug(\n \"XAI Validation :: Metadata: [%s] Variable `%s` should be a member \"\n \"of `%s`\", class_name, var_name, sorted_list_type)\n if var not in list_type:\n raise ValueError(\"{} not in {}. Got {}.\".format(\n var_name, sorted_list_type, var))",
"def comma_separated_validator(**kwargs):\n for name, param in kwargs.items():\n if param is not None:\n try:\n param.split(',')\n except AttributeError:\n raise PyYouTubeException(ErrorMessage(\n status_code=ErrorCode.INVALID_PARAMS,\n message=f'Parameter {name} must be str or comma-separated list str'\n ))",
"def __parse_args(validator, args, kwargs, enforce_type=True, enforce_shape=True, allow_extra=False): # noqa: 901\n ret = dict()\n type_errors = list()\n value_errors = list()\n argsi = 0\n extras = dict(kwargs)\n try:\n it = iter(validator)\n arg = next(it)\n # process positional arguments\n while True:\n #\n if 'default' in arg:\n break\n argname = arg['name']\n argval_set = False\n if argname in kwargs:\n argval = kwargs.get(argname)\n extras.pop(argname, None)\n argval_set = True\n elif argsi < len(args):\n argval = args[argsi]\n argval_set = True\n\n if not argval_set:\n type_errors.append(\"missing argument '%s'\" % argname)\n else:\n if argname in ret:\n type_errors.append(\"'got multiple arguments for '%s\" % argname)\n else:\n if enforce_type:\n if not __type_okay(argval, arg['type']):\n fmt_val = (argname, type(argval).__name__, __format_type(arg['type']))\n type_errors.append(\"incorrect type for '%s' (got '%s', expected '%s')\" % fmt_val)\n if enforce_shape and 'shape' in arg:\n if not __shape_okay_multi(argval, arg['shape']):\n fmt_val = (argname, get_data_shape(argval), arg['shape'])\n value_errors.append(\"incorrect shape for '%s' (got '%s, expected '%s')\" % fmt_val)\n ret[argname] = argval\n argsi += 1\n arg = next(it)\n while True:\n argname = arg['name']\n if argname in kwargs:\n ret[argname] = kwargs.get(argname)\n extras.pop(argname, None)\n elif len(args) > argsi:\n ret[argname] = args[argsi]\n argsi += 1\n else:\n ret[argname] = arg['default']\n if enforce_type:\n argval = ret[argname]\n if not __type_okay(argval, arg['type'], arg['default'] is None):\n fmt_val = (argname, type(argval).__name__, __format_type(arg['type']))\n type_errors.append(\"incorrect type for '%s' (got '%s', expected '%s')\" % fmt_val)\n if enforce_shape and 'shape' in arg:\n if not __shape_okay_multi(argval, arg['shape']):\n fmt_val = (argname, get_data_shape(argval), arg['shape'])\n value_errors.append(\"incorrect shape for '%s' (got '%s, expected '%s')\" % fmt_val)\n arg = next(it)\n except StopIteration:\n pass\n if not allow_extra:\n for key in extras.keys():\n type_errors.append(\"unrecognized argument: '%s'\" % key)\n else:\n # TODO: Extras get stripped out if function arguments are composed with fmt_docval_args.\n # allow_extra needs to be tracked on a function so that fmt_docval_args doesn't strip them out\n for key in extras.keys():\n ret[key] = extras[key]\n return {'args': ret, 'type_errors': type_errors, 'value_errors': value_errors}",
"def update_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n # read the original value passed by the command\n ksm_metric_labels_allow_list = self.context.raw_param.get(\"ksm_metric_labels_allow_list\")\n ksm_metric_annotations_allow_list = self.context.raw_param.get(\"ksm_metric_annotations_allow_list\")\n\n if ksm_metric_labels_allow_list is None:\n ksm_metric_labels_allow_list = \"\"\n if ksm_metric_annotations_allow_list is None:\n ksm_metric_annotations_allow_list = \"\"\n\n if self.context.get_enable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=True)\n mc.azure_monitor_profile.metrics.kube_state_metrics = self.models.ManagedClusterAzureMonitorProfileKubeStateMetrics( # pylint:disable=line-too-long\n metric_labels_allowlist=str(ksm_metric_labels_allow_list),\n metric_annotations_allow_list=str(ksm_metric_annotations_allow_list))\n\n if self.context.get_disable_azure_monitor_metrics():\n if mc.azure_monitor_profile is None:\n mc.azure_monitor_profile = self.models.ManagedClusterAzureMonitorProfile()\n mc.azure_monitor_profile.metrics = self.models.ManagedClusterAzureMonitorProfileMetrics(enabled=False)\n\n if (\n self.context.raw_param.get(\"enable_azure_monitor_metrics\") or\n self.context.raw_param.get(\"disable_azure_monitor_metrics\")\n ):\n self.context.external_functions.ensure_azure_monitor_profile_prerequisites(\n self.cmd,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n self.__raw_parameters,\n self.context.get_disable_azure_monitor_metrics(),\n False)\n\n return mc",
"def _validate_consumer_groups(self, val):\n\n try:\n for item in val:\n assert isinstance(item, dict)\n assert item.get(\"topic\") is not None and item.get(\"group\") is not None\n return val\n except Exception, e:\n logger.exception(e)\n raise Exception('''The `topics` value must be a mapping of mappings, like this:\n topics:\n - topic: as_nginx-access\n group: as_nginx-access-logstash\n zk_enabled: true(default)\n - topic: as_nginx-access\n group: as_nginx-access-slog-slog\n zk_enabled: false\n ''')",
"def validate_input(args: Dict[str, Any]):\n try:\n # we assume all the params to be non-empty, as cortex ensures it\n if args.get('limit') and int(args.get('limit', '1')) <= 0:\n raise ValueError(f\"Limit should be positive, limit: {args.get('limit')}\")\n\n try:\n if args.get('begin', None):\n _start_date = parser.parse(args.get('begin', '')).replace(tzinfo=pytz.UTC)\n if args.get('end', None):\n _end_date = parser.parse(args.get('end', '')).replace(tzinfo=pytz.UTC)\n except Exception as e:\n raise ValueError(\"Invalid date format received, [{}]\".format(e))\n\n if args.get('begin', None) and _start_date > datetime.now(timezone.utc):\n raise ValueError(\"Start date must be a date before or equal to current\")\n if args.get('end', None) and _end_date > datetime.now(timezone.utc):\n raise ValueError(\"End date must be a date before or equal to current\")\n if args.get('begin', None) and args.get('end', None) and _start_date > _end_date:\n raise ValueError(\"Start date cannot be after end date\")\n\n if not args.get('collection', False):\n raise ValueError(f\"Collection Name should be provided: {arg_to_number(args.get('collection', None))}\")\n\n return None\n except Exception as e:\n demisto.error(\"Exception with validating inputs [{}]\".format(e))\n raise e",
"def validate_input(self, definition):\n \"\"\"Implement your own validation logic to validate the input stanza configurations\"\"\"\n # This example accesses the modular input variable\n opt_labels = definition.parameters.get('label', None)\n pass",
"def check_input_options(args):\n\n # Make sure the input file is trimmed for use later on in the program.\n args.input_file = args.input_file.strip()\n\n # Make sure the output_type string(s) is(are) trimmed and lowercase.\n args.output_type = [x.strip().lower() for x in args.output_type]\n\n # The DPI value must be greater than zero...\n if args.dpi_val <= 0.:\n raise ValueError(\"DPI value must be > 0.\")",
"def check_input_options(args):\n\n # Make sure the input file is trimmed for use later on in the program.\n args.input_file = args.input_file.strip()\n\n # Make sure the output_type string(s) is(are) trimmed and lowercase.\n args.output_type = [x.strip().lower() for x in args.output_type]\n\n # The DPI value must be greater than zero...\n if args.dpi_val <= 0.:\n raise ValueError(\"DPI value must be > 0.\")"
] |
[
"0.5151069",
"0.48074573",
"0.47254848",
"0.46981266",
"0.46793914",
"0.46076208",
"0.46075413",
"0.45812404",
"0.45526966",
"0.45468676",
"0.45406038",
"0.45279703",
"0.44712833",
"0.44638926",
"0.4431412",
"0.44211832",
"0.4415951",
"0.4395557",
"0.4394504",
"0.4365359",
"0.43605614",
"0.43001822",
"0.4294732",
"0.4283012",
"0.42826185",
"0.42756414",
"0.4272962",
"0.4271538",
"0.42700613",
"0.42700613"
] |
0.76978266
|
0
|
Helper function to validate gmsa related options. When enable_windows_gmsa is specified, if both gmsa_dns_server and gmsa_root_domain_name are not assigned and user does not confirm the operation, a DecoratorEarlyExitException will be raised; if only one of gmsa_dns_server or gmsa_root_domain_name is assigned, raise a RequiredArgumentMissingError. When enable_windows_gmsa is not specified, if any of gmsa_dns_server or gmsa_root_domain_name is assigned, raise a RequiredArgumentMissingError.
|
def __validate_gmsa_options(
self,
enable_windows_gmsa,
gmsa_dns_server,
gmsa_root_domain_name,
yes,
) -> None:
if enable_windows_gmsa:
if gmsa_dns_server is None and gmsa_root_domain_name is None:
msg = (
"Please assure that you have set the DNS server in the vnet used by the cluster "
"when not specifying --gmsa-dns-server and --gmsa-root-domain-name"
)
if not yes and not prompt_y_n(msg, default="n"):
raise DecoratorEarlyExitException()
elif not all([gmsa_dns_server, gmsa_root_domain_name]):
raise RequiredArgumentMissingError(
"You must set or not set --gmsa-dns-server and --gmsa-root-domain-name at the same time."
)
else:
if any([gmsa_dns_server, gmsa_root_domain_name]):
raise RequiredArgumentMissingError(
"You only can set --gmsa-dns-server and --gmsa-root-domain-name "
"when setting --enable-windows-gmsa."
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_enable_windows_gmsa(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_windows_gmsa = self.raw_param.get(\"enable_windows_gmsa\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.windows_profile and\n hasattr(self.mc.windows_profile, \"gmsa_profile\") and # backward compatibility\n self.mc.windows_profile.gmsa_profile and\n self.mc.windows_profile.gmsa_profile.enabled is not None\n ):\n enable_windows_gmsa = self.mc.windows_profile.gmsa_profile.enabled\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n (\n gmsa_dns_server,\n gmsa_root_domain_name,\n ) = self._get_gmsa_dns_server_and_root_domain_name(\n enable_validation=False\n )\n self.__validate_gmsa_options(\n enable_windows_gmsa, gmsa_dns_server, gmsa_root_domain_name, self.get_yes()\n )\n return enable_windows_gmsa",
"def get_enable_windows_gmsa(self) -> bool:\n return self._get_enable_windows_gmsa(enable_validation=True)",
"def _get_gmsa_dns_server_and_root_domain_name(self, enable_validation: bool = False):\n # gmsa_dns_server\n # read the original value passed by the command\n gmsa_dns_server = self.raw_param.get(\"gmsa_dns_server\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n gmsa_dns_read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.windows_profile and\n hasattr(self.mc.windows_profile, \"gmsa_profile\") and # backward compatibility\n self.mc.windows_profile.gmsa_profile and\n self.mc.windows_profile.gmsa_profile.dns_server is not None\n ):\n gmsa_dns_server = self.mc.windows_profile.gmsa_profile.dns_server\n gmsa_dns_read_from_mc = True\n\n # gmsa_root_domain_name\n # read the original value passed by the command\n gmsa_root_domain_name = self.raw_param.get(\"gmsa_root_domain_name\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n gmsa_root_read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.windows_profile and\n hasattr(self.mc.windows_profile, \"gmsa_profile\") and # backward compatibility\n self.mc.windows_profile.gmsa_profile and\n self.mc.windows_profile.gmsa_profile.root_domain_name is not None\n ):\n gmsa_root_domain_name = self.mc.windows_profile.gmsa_profile.root_domain_name\n gmsa_root_read_from_mc = True\n\n # consistent check\n if gmsa_dns_read_from_mc != gmsa_root_read_from_mc:\n raise CLIInternalError(\n \"Inconsistent state detected, one of gmsa_dns_server and gmsa_root_domain_name \"\n \"is read from the `mc` object.\"\n )\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n self.__validate_gmsa_options(\n self._get_enable_windows_gmsa(enable_validation=False),\n gmsa_dns_server,\n gmsa_root_domain_name,\n self.get_yes(),\n )\n return gmsa_dns_server, gmsa_root_domain_name",
"def check_options(options):\n if options['batch'] not in (True, False):\n raise ValueError('User Error: Invalid argument provided for batch type.')\n if options['sigmoid_type'] not in ('norm', 'logistic', 'gumbel', 'weibull', 'quick', 'log-quick', 'hyperbolic'):\n raise ValueError('User Error: Sigmoid type specified is not one made available by the module.')\n if options['logspace'] not in (True, False):\n raise ValueError('User/Internal Error: Invalid argument for log-spacing of x-values provided.')\n if options['nafc'] not in (1,2,3,4,5,6,7,8,9,10):\n raise ValueError('Warning: Value provided for nAFC is greater than 10, or less than zero!')\n if isinstance(options['threshold'], (int, float, complex)) is False:\n raise ValueError('Please provide a numerical argument for proportion correct estimate of threshold.')\n if not 0 <= options['threshold'] <= 1:\n raise ValueError('User Error: Value provided for threshold estimate not between zero and one!')\n if isinstance(options['nafc'], int) is False:\n raise ValueError('User Error: Please provide a integer for nAFC argument.')\n if isinstance(options['param_ests'], list) is False:\n raise ValueError('User Error: Please provide a argument of type list for the parameter estimates of the model.')\n if isinstance(options['param_free'], list) is False:\n raise ValueError('''User Error: Please provide a argument of type list for the \n parameter constraints of the model.''')\n if isinstance(options['density'], int) is False:\n raise ValueError('User Error: Please provide a integer for density of grid.')",
"def opt_validate (optparser):\n (options, args) = optparser.parse_args()\n if not options.config:\n optparser.print_help()\n sys.exit(1)\n elif not options.fastq1 and not options.fastq2 and not options.in_bam:\n optparser.print_help()\n sys.exit(1)\n elif options.dataprocess[2] == \"1\" and not options.fastq1 and not options.fastq2:\n optparser.print_help()\n sys.exit(1)\n return(options)",
"def _verify_options(config: configuration.Config) -> None:\n\n if not config.config['species']:\n log._logger.error('You must specify a species (-s/--species)')\n exit(1)\n\n if config.config['hpc'] and config.config['local']:\n log._logger.error('You can only use one of the config options (hpc/local)')\n exit(1)\n\n if config.config['hpc'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (hpc/custom)')\n exit(1)\n\n if config.config['local'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (local/custom)')\n exit(1)\n\n if (not config.config['hpc']) and\\\n (not config.config['local']) and\\\n (not config.config['custom']):\n log._logger.error(\n 'You must specify a compute cluster environment (hpc/local/custom)'\n )\n exit(1)\n\n if config.config['custom'] and (not config.config['scheduler']):\n log._logger.error(\n 'The custom compute environment requires a scheduler address to be set'\n )\n exit(1)",
"def validate(self):\r\n for opt in self.required:\r\n if not getattr(self, opt):\r\n print \"Error: %s is not specified.\" % opt\r\n self.optp.print_help()\r\n sys.exit(1)",
"def check_common_args(args, function_name,\n valid_functions=['gaperture', 'gmap', 'gfind'],\n allow_no_coords=False):\n\n try:\n function_name = function_name.strip().lower()\n except AttributeError:\n raise gPhotonArgsError(\"Invalid function: {f}\".format(f=function_name))\n\n if not function_name in valid_functions:\n raise gPhotonArgsError(\"Invalid function: {f}\".format(f=function_name))\n\n try:\n args.band = args.band.strip()\n except AttributeError:\n raise SystemExit(\"Invalid band: {b}\".format(b=args.band))\n\n # This will ensure calpath has a trailing '/'.\n if function_name in ['gaperture', 'gmap']:\n args.calpath = os.path.join(args.calpath, '')\n # [Future]: Consider fixing this statement. This is breaking nosetests,\n # but it's not a bad idea...\n # if not os.path.isdir(args.calpath):\n # raise SystemExit(\"Calibration path not found: \" + args.calpath)\n\n if (not (args.ra and args.dec) and not args.skypos and\n not allow_no_coords):\n raise SystemExit(\"Must specify either both RA/DEC or SKYPOS.\")\n elif (args.ra and args.dec) and args.skypos:\n if not (args.ra == args.skypos[0] and args.dec == args.skypos[1]):\n raise SystemExit(\"Must specify either RA/DEC or SKYPOS, not both.\")\n elif (args.ra and args.dec) and not args.skypos:\n args.skypos = [args.ra, args.dec]\n elif not (args.ra and args.dec) and args.skypos:\n args.ra, args.dec = args.skypos\n\n if args.suggest and function_name in ['gfind', 'gaperture']:\n (args.ra, args.dec, args.radius, args.annulus1,\n args.annulus2) = dbt.suggest_parameters(args.band, args.skypos,\n verbose=0)\n args.skypos = [args.ra, args.dec]\n if args.verbose:\n print(\"Recentering on [\"+str(args.ra)+\", \"+str(args.dec)+\"]\")\n print(\"Setting radius to \"+str(args.radius))\n print(\"Setting annulus to [\"+str(args.annulus1)+\", \"+\n str(args.annulus2)+\"]\")\n\n if args.skypos:\n if np.array(args.skypos).shape != (2,):\n raise gPhotonArgsError(\n \"Skypos (--skypos) must be a 2-element array.\")\n args.ra, args.dec = args.skypos\n\n if args.ra and not 0. <= args.ra <= 360.:\n raise SystemExit(\n \"RA of {ra} does not satisfy 0 <= RA <= 360\".format(ra=args.ra))\n\n if args.dec and not -90 <= args.dec <= 90:\n raise SystemExit(\n \"Dec of {dec} does not satisfy -90 <= DEC <= 90\".format(\n dec=args.dec))\n\n if args.detsize and args.detsize <= 0.:\n raise SystemExit(\"Effective field diameter (--detsize) must be > 0\")\n\n if args.maxgap and args.maxgap <= 0.:\n raise SystemExit(\"Maximum gap length (--maxgap) must be > 0 seconds.\")\n if args.minexp and args.minexp <= 0.:\n raise SystemExit(\"Minimum valid exposure depth (--minexp) must be > 0\"\n \" seconds.\")\n\n if args.retries and args.retries <= 0.:\n raise SystemExit(\"Number of retries (--retries) must be > 0.\")\n\n # tmin / tmax must be defined and reasonable\n if not args.tmin or args.tmin <= 0.:\n raise SystemExit(\"T0 (--t0) must be > 0.\")\n if not args.tmax or args.tmax <= 0.:\n raise SystemExit(\"T1 (--t1) must be > 0.\")\n if args.tmin >= args.tmax:\n raise SystemExit(\"Minimum time (--t0) must be < maximum time (--t1).\")\n\n if args.trange:\n if np.array(args.trange).shape == (2, ):\n args.trange = [args.trange]\n if not (len(np.array(args.trange).shape) == 2 and\n np.array(args.trange).shape[1] == 2):\n raise SystemExit(\"trange (--trange) must be a pairwise list.\")\n # Individually check the entries for sanity\n for t in args.trange:\n if t[0] <= 0 or t[1] <= 0:\n raise SystemExit('Times must be positive: {t}'.format(t=t))\n if t[1] <= t[0]:\n raise SystemExit('Start time ({t0}) must preceed end time'\n ' ({t1})'.format(t0=t[0], t1=t[1]))\n elif not allow_no_coords and function_name in ['gmap', 'gaperture']:\n args.trange = dbt.fGetTimeRanges(args.band, args.skypos,\n trange=[args.tmin, args.tmax],\n maxgap=args.maxgap, minexp=args.minexp,\n detsize=args.detsize,\n skyrange=args.skyrange)\n else:\n # If no coordinates specified then use a huge time range for now.\n args.trange = [args.tmin, args.tmax]\n\n return args",
"def check_invalid_argument_usage(args):\n optim_args = ['use_adam', 'use_rmsprop', 'use_adadelta', 'use_adagrad']\n for i, o1 in enumerate(optim_args):\n if not hasattr(args, o1):\n continue\n\n for j, o2 in enumerate(optim_args):\n if i == j or not hasattr(args, o2):\n continue\n\n if getattr(args, o1) and getattr(args, o2):\n raise ValueError('Cannot simultaneously use 2 optimizers ' +\n '(arguments \"%s\" and \"%s\").' % (o1, o2))\n\n if hasattr(args, 'clip_grad_value') and hasattr(args, 'clip_grad_norm'):\n if args.clip_grad_value != -1 and args.clip_grad_norm != -1:\n raise ValueError('Cannot simultaneously clip gradiant values and ' +\n 'gradient norm.')\n\n if hasattr(args, 'cl_scenario') and hasattr(args, 'split_head_cl3'):\n if args.cl_scenario != 3 and args.split_head_cl3:\n raise ValueError('Flag \"split_head_cl3\" may only be set when ' +\n 'running CL scenario 3 (CL3)!')\n\n # TODO if `custom_network_init` is used but deactivated, then the other init\n # options have no effect -> user should be warned.\n\n ### Check consistent use of arguments from `main_net_args`.\n # FIXME These checks don't deal with prefixes yet!\n if hasattr(args, 'net_type') and hasattr(args, 'dropout_rate'):\n if args.net_type in ['resnet', 'bio_conv_net'] and \\\n args.dropout_rate != -1:\n warn('Dropout is not implement for network %s.' % args.net_type)\n\n if hasattr(args, 'net_type') and hasattr(args, 'specnorm'):\n if args.net_type in ['resnet', 'zenke', 'bio_conv_net'] and \\\n args.specnorm:\n warn('Spectral Normalization is not implement for network %s.'\n % args.net_type)\n\n if hasattr(args, 'net_type') and hasattr(args.net_act):\n if args.net_type in ['resnet', 'zenke'] and args.net_act != 'relu':\n warn('%s network uses ReLU activation functions. ' % args.net_type +\n 'Ignoring option \"net_act\".')\n\n if args.net_type in ['bio_conv_net']: # and args.net_act != 'tanh':\n warn('%s network uses Tanh activation functions. ' % args.net_type +\n 'Ignoring option \"net_act\".')\n\n if hasattr(args, 'net_type') and hasattr(args.no_bias):\n # FIXME Should be configurable for resnet in future!\n if args.net_type in ['resnet', 'zenke', 'bio_conv_net'] and \\\n args.no_bias:\n warn('%s network always uses biases!' % args.net_type)\n\n bn_used = False\n if hasattr(args, 'batchnorm'):\n bn_used = args.batchnorm\n elif hasattr(args, 'no_batchnorm'):\n bn_used = not args.no_batchnorm\n else:\n # We don't know whether it is used.\n bn_used = None\n\n if bn_used is not None and bn_used and hasattr(args, 'net_type'):\n if args.net_type in ['zenke', 'bio_conv_net']:\n warn('Batch Normalization is not implemented for network %s.'\n % args.net_type)\n\n if bn_used is not None and hasattr(args, 'bn_no_running_stats'):\n if not bn_used and args.bn_no_running_stats:\n warn('Option \"bn_no_running_stats\" has no effect if batch ' +\n 'normalization not activated.')\n\n if bn_used is not None and hasattr(args, 'bn_distill_stats'):\n if not bn_used and args.bn_distill_stats:\n warn('Option \"bn_distill_stats\" has no effect if batch ' +\n 'normalization not activated.')\n\n if bn_used is not None and hasattr(args, 'bn_no_stats_checkpointing'):\n if not bn_used and args.bn_no_stats_checkpointing:\n warn('Option \"bn_no_stats_checkpointing\" has no effect if batch ' +\n 'normalization not activated.')\n\n if hasattr(args, 'bn_no_stats_checkpointing') and \\\n hasattr(args, 'bn_no_running_stats') and \\\n args.bn_no_stats_checkpointing and args.bn_no_running_stats:\n raise ValueError('Options \"bn_no_stats_checkpointing\" and ' +\n '\"bn_no_running_stats\" are not compatible')\n if hasattr(args, 'bn_no_stats_checkpointing') and \\\n hasattr(args, 'bn_no_running_stats') and \\\n args.bn_no_stats_checkpointing and args.bn_distill_stats:\n raise ValueError('Options \"bn_no_running_stats\" and ' +\n '\"bn_distill_stats\" are not compatible')\n if hasattr(args, 'bn_no_running_stats') and \\\n hasattr(args, 'bn_distill_stats') and \\\n args.bn_no_stats_checkpointing and args.bn_distill_stats:\n raise ValueError('Options \"bn_no_running_stats\" and ' +\n '\"bn_distill_stats\" are not compatible')",
"def validate_shared_index_options(options):\n \n if options.vcf_phasing:\n require(all([vcf.endswith('.vcf.gz') for vcf in options.vcf_phasing]),\n 'input phasing files must end with .vcf.gz')\n if 'gbwt' in options.indexes:\n require(len(options.vcf_phasing) > 0, 'generating a GBWT requires a VCF with phasing information')\n if options.gbwt_prune:\n require(('gbwt' in options.indexes) or options.gbwt_input, '--gbwt_index or --gbwt_input required for --gbwt_prune')\n if options.vcf_phasing_regions:\n require('gbwt' in options.indexes, \"cannot hint regions to GBWT indexer without building a GBWT index\")",
"def validateOptions(self):\n SubCommand.validateOptions(self)\n if not re.match('^yes$|^no$', self.options.usedbs):\n raise ConfigurationException(\"--dbs option only accepts the yes and no values (--dbs=yes or --dbs=no)\")\n self.usedbs = 1 if self.options.usedbs == 'yes' else 0\n\n self.outdir = self.options.outdir",
"def _validate_options(options):\n if not options.pythons:\n raise Exception(\"No Pythons given - see -p.\")\n for python in options.pythons:\n if not shutil.which(python):\n raise Exception(\n \"Python %(python)s not found.\" % dict(python=python))\n if not options.requirements:\n raise Exception(\"No requirements file specified - see -r.\")\n if not os.path.exists(options.requirements):\n raise Exception(\n \"Requirements file %(req)s not found.\"\n % dict(req=options.requirements))\n if options.blacklist and not os.path.exists(options.blacklist):\n raise Exception(\n \"Blacklist file %(path)s not found.\"\n % dict(path=options.blacklist))\n version_map = {}\n for map_entry in options.version_map:\n if ':' not in map_entry:\n raise Exception(\n \"Invalid version-map entry %(map_entry)s\"\n % dict(map_entry=map_entry))\n src, dst = map_entry.split(':')\n version_map.setdefault(src, set())\n version_map[src].add(dst)\n options.version_map = version_map",
"def validate_args(args):\n if args.discovery_reports_dir:\n if not os.path.isdir(args.discovery_reports_dir):\n msg = (\n \"--discovery-reports-dir: Given path %s does not exist\" % args.discovery_reports_dir\n )\n raise argparse.ArgumentTypeError(msg)\n if args.config_dir:\n if not os.path.isdir(args.config_dir):\n msg = \"--config-dir: Given path %s does not exist\" % args.config_dir\n raise argparse.ArgumentTypeError(msg)\n if args.config and not os.path.exists(args.config):\n raise argparse.ArgumentTypeError(\"--config: Given path does not exist\")\n if args.suite_groups:\n for sc in args.suite_groups:\n if sc not in PYTEST_SUITE_GROUPS:\n raise argparse.ArgumentTypeError(\n \"suite group %s is not valid. \"\n \"Valid suite groups:%s\" % (sc, list(PYTEST_SUITE_GROUPS.keys()))\n )\n if args.use_pssh:\n if (\n args.aws_region is None\n or args.store_domain is None\n or args.store_type is None\n or args.store_id is None\n ):\n msg = f\"Need to specify --aws-region --store-domain --store-type --store-id\"\n raise argparse.ArgumentTypeError(msg)\n pytest._args = args",
"def _validate(self, args, kwargs) -> None:\n\n def error(\n exception_type: type[RegistrationError],\n arg_name: str | None = None,\n **msg_kwargs,\n ) -> None:\n if arg_name is None:\n arg_name = args[0] if args else \"<unknown>\"\n raise exception_type(self.scope, arg_name, **msg_kwargs)\n\n if not args:\n error(NoOptionNames)\n # Validate args.\n for arg in args:\n # We ban short args like `-x`, except for special casing the global option `-l`.\n if not arg.startswith(\"--\") and not (self.scope == GLOBAL_SCOPE and arg == \"-l\"):\n error(OptionNameDoubleDash, arg_name=arg)\n\n # Validate kwargs.\n if \"implicit_value\" in kwargs and kwargs[\"implicit_value\"] is None:\n error(ImplicitValIsNone)\n type_arg = kwargs.get(\"type\", str)\n if \"member_type\" in kwargs and type_arg != list:\n error(MemberTypeNotAllowed, type_=type_arg.__name__)\n member_type = kwargs.get(\"member_type\", str)\n is_enum = inspect.isclass(member_type) and issubclass(member_type, Enum)\n if not is_enum and member_type not in self._allowed_member_types:\n error(InvalidMemberType, member_type=member_type.__name__)\n\n help_arg = kwargs.get(\"help\")\n if help_arg is not None and not isinstance(help_arg, str):\n error(HelpType, help_type=type(help_arg).__name__)\n\n # check type of default value\n default_value = kwargs.get(\"default\")\n if default_value is not None:\n if isinstance(default_value, str) and type_arg != str:\n # attempt to parse default value, for correctness..\n # custom function types may implement their own validation\n default_value = self.to_value_type(default_value, type_arg, member_type)\n if hasattr(default_value, \"val\"):\n default_value = default_value.val\n\n # fall through to type check, to verify that custom types returned a value of correct type\n\n if isinstance(type_arg, type) and not isinstance(default_value, type_arg):\n error(\n DefaultValueType,\n option_type=type_arg.__name__,\n default_value=kwargs[\"default\"],\n value_type=type(default_value).__name__,\n )\n\n # verify list member types (this is not done by the custom list value type)\n if type_arg == list:\n for member_val in default_value:\n if not isinstance(member_type, type):\n # defer value validation to custom type\n member_type(member_val)\n\n elif not isinstance(member_val, member_type):\n error(\n DefaultMemberValueType,\n member_type=member_type.__name__,\n member_value=member_val,\n value_type=type(member_val).__name__,\n )\n\n if (\n \"passthrough\" in kwargs\n and kwargs[\"passthrough\"]\n and (type_arg != list or member_type not in (shell_str, str))\n ):\n error(PassthroughType)\n\n for kwarg in kwargs:\n if kwarg not in self._allowed_registration_kwargs:\n error(InvalidKwarg, kwarg=kwarg)\n\n # Ensure `daemon=True` can't be passed on non-global scopes.\n if kwarg == \"daemon\" and self._scope != GLOBAL_SCOPE:\n error(InvalidKwargNonGlobalScope, kwarg=kwarg)\n\n removal_version = kwargs.get(\"removal_version\")\n if removal_version is not None:\n validate_deprecation_semver(removal_version, \"removal version\")",
"def check_options(*options):\n def wrapper(fn):\n @wraps(fn)\n def wrapped(*args, **kwargs):\n if not options:\n raise ValueError(\n 'At least one option set is needed: '\n '{need}'.format(\n need=', '.join(unique_options.keys())\n )\n )\n check = [\n v for (k, v) in unique_options.items()\n if k in options\n ]\n if len(options) != len(check):\n diff = set(options) - set(unique_options.keys())\n raise ValueError(\n 'Invalid option set: {options}'.format(\n options=', '.join(diff)\n )\n )\n for unique in check:\n found = [\n k for k in kwargs.keys() if k in unique\n ]\n if not found:\n raise ValueError(\n 'At least one option is needed: {need}'.format(\n need=', '.join(unique)\n )\n )\n if len(found) > 1:\n raise ValueError(\n 'Only one option can be specified: '\n '{need}'.format(\n need=', '.join(unique),\n )\n )\n return fn(*args, **kwargs)\n return wrapped\n return wrapper",
"def check_gs_argument(ground_state):\n required_fields = [\"bc\", \"cf\", \"eci\", \"atoms\"]\n keys = ground_state.keys()\n for key in keys:\n if key not in required_fields:\n raise ValueError(\n \"The GS argument has to contain {} keys. Given {}\".format(\n required_fields, keys))",
"def checkOption(options, name, propname, optletter, encrypted=False, cdfPropname = None):\n if name not in options:\n ret = getPgaasPropValue(propname, encrypted=encrypted, dflt=None, skipComplaining=True)\n if ret is None and cdfPropname is not None:\n ret = getCdfPropValue(cdfPropname, encrypted=encrypted)\n options[name] = ret\n requireOption(\"either %s or config[%s]\" % (optletter, propname), options[name])",
"def check_options(_parser, _options):\n\n opterr = False\n\n # Check required options\n reqd_opts = [\"topic\", \"input_prefix\", \"output_prefix\", \"queue\"]\n for attr in reqd_opts:\n if not getattr(_options, attr):\n _parser.print_help()\n raise MissingRequiredOptionsException(\n \"Required option '%s' missing\" % attr)\n\n # Create mapping of all sources for which values have been supplied\n all_sources = [\"year\", \"file\", \"directory\", \"window\", \"lookback\"]\n sources = dict()\n for src in all_sources:\n opt_val = getattr(_options, src)\n if opt_val:\n sources[src] = opt_val\n\n # Check for conflicting options\n if len(sources.keys()) != 1:\n _parser.print_help()\n raise InvalidSourceException(\n \"Exactly one of these options required: [-y | -D | -f | -w | -l]\")\n\n # At this time, we've ensured that sources contains only one key. Return its\n # value\n return sources.keys()[0]",
"def _get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n self, enable_validation: bool = False\n ) -> Tuple[Union[str, None], Union[str, None], Union[str, None]]:\n # get aad profile from `mc`\n aad_profile = None\n if self.mc:\n aad_profile = self.mc.aad_profile\n\n # read the original value passed by the command\n aad_client_app_id = self.raw_param.get(\"aad_client_app_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.client_app_id is not None:\n aad_client_app_id = aad_profile.client_app_id\n\n # read the original value passed by the command\n aad_server_app_id = self.raw_param.get(\"aad_server_app_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.server_app_id is not None:\n aad_server_app_id = aad_profile.server_app_id\n\n # read the original value passed by the command\n aad_server_app_secret = self.raw_param.get(\"aad_server_app_secret\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.server_app_secret is not None:\n aad_server_app_secret = aad_profile.server_app_secret\n\n # these parameters do not need dynamic completion\n\n # validation\n if enable_validation:\n enable_aad = self._get_enable_aad(enable_validation=False)\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n return aad_client_app_id, aad_server_app_id, aad_server_app_secret",
"def validateInput(self): \n if (self.options.mexURL and self.options.token): #run module through engine service\n return True\n \n if (self.options.user and self.options.pwd and self.options.root): #run module locally (note: to test module)\n return True\n \n log.debug('Botanicam: Insufficient options or arguments to start this module')\n return False",
"def validate_index_options(options):\n if len(options.indexes) > 0:\n require(len(options.graphs) == 0 or options.chroms, '--chroms must be specified for --graphs')\n require(len(options.graphs) == 1 or len(options.chroms) == len(options.graphs),\n '--chroms and --graphs must have'\n ' same number of arguments if more than one graph specified if doing anything but xg indexing')\n require(any([len(options.indexes) > 0, \n options.bwa_index_fasta]),\n 'one of --xg_index, --gcsa_index, --snarls_index, --trivial_snarls_index, --id_ranges_index, '\n '--gbwt_index, --minimizer_index, --distance_index, --all_index, --alt_path_gam_index or '\n '--bwa_index_fasta is required')\n require(not options.gbwt_prune or options.node_mapping,\n '--node_mapping required with --gbwt_prune')\n require('gbwt' not in options.indexes or not options.gbwt_input,\n 'only one of --gbwt_index and --gbwt_input can be used at a time')\n if options.gbwt_input:\n require(options.gbwt_prune == 'gbwt', '--gbwt_prune required with --gbwt_input')\n validate_shared_index_options(options)",
"def ValidateGnArgs(value):\n return gn_helpers.FromGNArgs(value)",
"def ssl_args_rewrite_validate(args):\n # Check that file paths are present and well-formed values. In that case,\n # set the user_hidden 'includes_ssl_files' flag to True.\n valid_kms_args = True\n for sslflag in SSL_FILE_FLAGS:\n argvalue = getattr(args, sslflag)\n if argvalue:\n args.includes_ssl_files = True\n if not re.match(GS_REGEX, argvalue):\n warn = '%s must be a GCS path (ex: gs://BUCKET/OBJECT)'\n warn += '\\nfound %s instead.'\n warn %= (sslflag, argvalue)\n Print.YL(warn)\n valid_kms_args = False\n # When passing SSL files, some default values may come from\n # global flags. Reset those defaults and raise errors if required\n # flags are missing.\n if args.includes_ssl_files:\n args.kms_project = args.kms_project or args.project\n if not args.kms_keyring:\n Print.YL('If including SSL files, you must set --kms-keyring.')\n valid_kms_args = False\n if not args.kms_key:\n Print.YL('If including SSL files, you must set --kms-key.')\n valid_kms_args = False\n if not valid_kms_args:\n Print.YL('SSL flags were missing or incorrect, see gce-deploy/README.md')\n sys.exit(1)\n return args",
"def ValidateInputs(lat_min, lat_max, lon_min, lon_max, lonres, latres, basepath, \\\n GFED_path, EDGAR_path, CAMS_path, behaviour_settings):\n # Assert sure extents fall within boundary\n assert -180 <= lon_min < 180 and -180 < lon_max <= 180, 'Longitude should be within range -180 -- 180!'\n assert -90 <= lat_min < 90 and -90 < lat_max <= 90, 'latitude should be within range -90 -- 90!'\n assert lon_min < lon_max, 'maximum longitude cannot be smaller than or equal to minimum!'\n assert lat_min < lat_max, 'maximum latitude cannot be smaller than or equal to minimum!'\n \n # Assert resolution is larger than TROPOMI minimum:\n assert lonres > 7, 'TROPOMI minimum longitude resolution is 7 km!'\n assert latres > 7, 'TROPOMI minimum latitude resolution is 7 km!'\n \n # Assert if given directories exist\n if behaviour_settings[1] == True:\n assert os.path.isdir(CAMS_path), f'Directory {CAMS_path} was not found!'\n if behaviour_settings[2] == True:\n assert os.path.isdir(GFED_path), f'Directory {GFED_path} was not found!'\n assert os.path.isdir(EDGAR_path), f'Directory {EDGAR_path} was not found!'\n\n \n return",
"def set_up_goma(self):\n if self.disable_goma():\n return False\n\n goma_path = self._find_goma_path()\n # We honor --cc-wrapper if it is set explicitly.\n if self.cc_wrapper():\n self._values['goma'] = False\n print '%s is used instead of Goma.' % self.cc_wrapper()\n return False\n\n if not goma_path:\n # Do not use Goma as it is not installed.\n self._values['goma'] = False\n return False\n\n self._values['goma'] = True\n self._values['cc_wrapper'] = os.path.join(goma_path, 'gomacc')\n\n if not self._goma_ctl_process:\n goma_ctl_command = [os.path.join(goma_path, 'goma_ctl.py'),\n self._get_goma_ensure_start_command()]\n # It takes about 2 seconds to run goma_ctl.py ensure_start. To\n # reduce the total time for ./configure, we run this in background\n # and check the exit status of it in the atexit handler.\n self._goma_ctl_process = subprocess.Popen(goma_ctl_command,\n stdout=subprocess.PIPE)\n atexit.register(self.wait_for_goma_ctl)\n return True",
"def _Validate(self):\n\n \n if self.cmsGenNode.applicationControls.get(\"generator\", None) == None:\n msg = \"No cmsGen generator option provided\"\n raise RuntimeError, msg\n \n return WorkflowMaker._Validate(self)",
"def check_args(args, iam='gfind', allow_no_coords=False):\n\n args = gargs.check_common_args(args, iam, allow_no_coords=allow_no_coords)\n\n return args",
"def _get_enable_aad(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_aad = self.raw_param.get(\"enable_aad\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.managed is not None\n ):\n enable_aad = self.mc.aad_profile.managed\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n enable_validation=False\n )\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if enable_aad:\n if check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-aad\" if managed AAD is already enabled'\n )\n return enable_aad",
"def validate_args(self):\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-v\", \"--verbose\", help=\"Verbose output\", action=\"store_true\")\n parser.add_argument(\"-u\", \"--user\", help=\"Google user email\", default=\"none\")\n parser.add_argument(\"-p\", \"--password\", help=\"Google user email password\", default=\"none\")\n parser.add_argument(\"-l\", \"--library\", help=\"Remove duplicate songs from library\", action=\"store_true\")\n parser.add_argument(\"-y\", \"--playlist\", help=\"Remove duplicate songs from playlists\", action=\"store_true\")\n # Built-in:\n # parser.add_argument(\"-h\", \"--help\", help=\"Usage help\", action=\"store_true\")\n\n args = parser.parse_args()\n if len(self.argv) == 0 or args.user == \"none\" or args.password == \"none\" or not (args.library or args.playlist):\n parser.print_help()\n exit(0)\n\n return args",
"def _check_required_opts(self, namespace=None):\n for info, group in self._all_opt_infos():\n opt = info['opt']\n\n if opt.required:\n if 'default' in info or 'override' in info:\n continue\n\n if self._get(opt.dest, group, namespace) is None:\n raise RequiredOptError(opt.name, group)"
] |
[
"0.658627",
"0.6262542",
"0.572155",
"0.54077303",
"0.5399864",
"0.5287891",
"0.51974815",
"0.5153438",
"0.51176727",
"0.5077888",
"0.5069212",
"0.5049037",
"0.5024756",
"0.5018313",
"0.5013415",
"0.4993465",
"0.49794912",
"0.49742782",
"0.49498725",
"0.493851",
"0.493188",
"0.49177217",
"0.49159586",
"0.49008846",
"0.48740405",
"0.487018",
"0.48246711",
"0.48222283",
"0.48190132",
"0.48182294"
] |
0.90864724
|
0
|
Helper function to obtain the value of subscription_id.
|
def get_subscription_id(self):
subscription_id = self.get_intermediate("subscription_id", None)
if not subscription_id:
subscription_id = self.cmd.cli_ctx.data.get('subscription_id')
if not subscription_id:
subscription_id = Profile(cli_ctx=self.cmd.cli_ctx).get_subscription_id()
self.cmd.cli_ctx.data['subscription_id'] = subscription_id
self.set_intermediate("subscription_id", subscription_id, overwrite_exists=True)
return subscription_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def subscription_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"subscription_id\")",
"def get_subscription_id(self):\n return self.instance_metadata.subscription_id",
"def saas_subscription_id(self) -> Optional[str]:\n return pulumi.get(self, \"saas_subscription_id\")",
"def storage_account_subscription_id(self) -> Optional[str]:\n return pulumi.get(self, \"storage_account_subscription_id\")",
"def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")",
"def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")",
"def get_subscription_argument(self, register_subscription_call):\n return register_subscription_call[0][0]",
"def azure_subscription_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"azure_subscription_id\")",
"def azure_subscription_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"azure_subscription_id\")",
"async def get_subscription_id(user: discord.User, redis: RedisDB):\n return await redis.get(user.id)",
"def get_user_id(self, details, response):\n return response.get(\"sub\")",
"def get_record_id(self):\n subdomain, record_id = self.key().name().split(':', 1)\n return record_id",
"def azure_subscription_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"azure_subscription_id\")",
"def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})",
"def GetITunesSubscriptionId(cls, verify_response):\n return kITunesPrefix + verify_response.GetOriginalTransactionId()",
"def cognitive_service_subscription_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cognitive_service_subscription_key\")",
"def cognitive_service_subscription_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cognitive_service_subscription_key\")",
"def cognitive_service_subscription_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cognitive_service_subscription_key\")",
"def get_subscription(\n connection, subscription_id, project_id, fields=None, error_msg=None\n):\n return connection.get(\n url=f'{connection.base_url}/api/subscriptions/{subscription_id}',\n params={'fields': fields},\n headers={'X-MSTR-ProjectID': project_id},\n )",
"def get_session_id(cls, topic: str) -> typing.Optional[str]:\n match = re.match(AsrAudioCaptured.TOPIC_PATTERN, topic)\n assert match, \"Not an audioCaptured topic\"\n return match.group(2)",
"def __getNewSubscriptionId(self):\n while 1:\n tmp = ''.join(random.choice(ID_LETTERS) for _ in range(SID_SIZE))\n if tmp in self._subscriptions: continue\n else: return tmp",
"def subscription_type(self) -> str:\n return pulumi.get(self, \"subscription_type\")",
"def get_site_id(cls, topic: str) -> typing.Optional[str]:\n match = re.match(AsrAudioCaptured.TOPIC_PATTERN, topic)\n assert match, \"Not an audioCaptured topic\"\n return match.group(1)",
"def uuid(self):\n try:\n return self.keystore['id']\n except KeyError:\n return None",
"def subscription_name_from_path(path, project):\n return _name_from_project_path(path, project, _SUBSCRIPTION_TEMPLATE)",
"def uniqueid(self):\n return self.raw.get(\"uniqueid\")",
"async def get_subscription(\r\n self, installed_app_id: str, subscription_id: str\r\n ) -> dict:\r\n return await self.get(\r\n API_SUBSCRIPTION.format(\r\n installed_app_id=installed_app_id, subscription_id=subscription_id\r\n )\r\n )",
"def get_id(self) -> Optional[str]:\n return self.id_",
"def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")",
"def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")"
] |
[
"0.80306053",
"0.7716836",
"0.71688896",
"0.69573057",
"0.6479567",
"0.6479567",
"0.6466907",
"0.6387201",
"0.63323355",
"0.6320796",
"0.6316846",
"0.6220021",
"0.62054557",
"0.6165518",
"0.6147188",
"0.6112461",
"0.6112461",
"0.6112461",
"0.60991603",
"0.6084195",
"0.6038495",
"0.59302247",
"0.58929986",
"0.580323",
"0.58019984",
"0.57599634",
"0.5746949",
"0.56942785",
"0.5682837",
"0.56385386"
] |
0.774553
|
1
|
Internal function to dynamically obtain the value of location according to the context. When location is not assigned, dynamic completion will be triggerd. Function "get_rg_location" will be called to get the location of the provided resource group, which internally used ResourceManagementClient to send the request. This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
|
def _get_location(self, read_only: bool = False) -> Union[str, None]:
# read the original value passed by the command
location = self.raw_param.get("location")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if self.mc and self.mc.location is not None:
location = self.mc.location
read_from_mc = True
# try to read from intermediate
if location is None and self.get_intermediate("location"):
location = self.get_intermediate("location")
# skip dynamic completion & validation if option read_only is specified
if read_only:
return location
# dynamic completion
if not read_from_mc and location is None:
location = self.external_functions.get_rg_location(
self.cmd.cli_ctx, self.get_resource_group_name()
)
self.set_intermediate("location", location, overwrite_exists=True)
# this parameter does not need validation
return location
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_location(self, name, group=None):\n opt_group = OptGroup(group) if group is not None else None\n value, loc = self._do_get(name, opt_group, None)\n return loc",
"def get_location(\n self,\n ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_location\" not in self._stubs:\n self._stubs[\"get_location\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.location.Locations/GetLocation\",\n request_serializer=locations_pb2.GetLocationRequest.SerializeToString,\n response_deserializer=locations_pb2.Location.FromString,\n )\n return self._stubs[\"get_location\"]",
"def get_location(\n self,\n ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_location\" not in self._stubs:\n self._stubs[\"get_location\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.location.Locations/GetLocation\",\n request_serializer=locations_pb2.GetLocationRequest.SerializeToString,\n response_deserializer=locations_pb2.Location.FromString,\n )\n return self._stubs[\"get_location\"]",
"def extended_location(self) -> Optional['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"extended_location\")",
"def extended_location(self) -> Optional['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"extended_location\")",
"def get_location(self):\n return self._overridden_location or self.get_default_location()",
"def get_location(self):\r\n response = self.connection.make_request('GET', self.name,\r\n query_args='location')\r\n body = response.read()\r\n if response.status == 200:\r\n rs = ResultSet(self)\r\n h = handler.XmlHandler(rs, self)\r\n xml.sax.parseString(body, h)\r\n return rs.LocationConstraint\r\n else:\r\n raise self.connection.provider.storage_response_error(\r\n response.status, response.reason, body)",
"def get_location(self):\n return self.location",
"def get_location(self):\r\n return None",
"def extended_location(self) -> pulumi.Output[Optional['outputs.ExtendedLocationResponse']]:\n return pulumi.get(self, \"extended_location\")",
"def _set_location(self, location=None):\n # Derive available locations\n # See https://cloud.google.com/life-sciences/docs/concepts/locations\n locations = (\n self._api.projects()\n .locations()\n .list(name=\"projects/snakemake-testing\")\n .execute()\n )\n\n locations = {x[\"locationId\"]: x[\"name\"] for x in locations.get(\"locations\", [])}\n\n # Alert the user about locations available\n logger.debug(\"locations-available:\\n%s\" % \"\\n\".join(locations))\n\n # If no locations, there is something wrong\n if not locations:\n raise WorkflowError(\"No locations found for Google Life Sciences API.\")\n\n # First pass, attempt to match the user-specified location (or prefix)\n if location:\n if location in locations:\n self.location = locations[location]\n return\n\n # It could be that a prefix was provided\n for contender in locations:\n if contender.startswith(location):\n self.location = locations[contender]\n return\n\n # If we get here and no match, alert user.\n raise WorkflowError(\n \"Location or prefix requested %s is not available.\" % location\n )\n\n # If we get here, we need to select location from regions\n for region in self.regions:\n if region in locations:\n self.location = locations[region]\n return\n\n # If we get here, choose based on prefix\n prefixes = set([r.split(\"-\")[0] for r in self.regions])\n regexp = \"^(%s)\" % \"|\".join(prefixes)\n for location in locations:\n if re.search(regexp, location):\n self.location = locations[location]\n return\n\n # If we get here, total failure of finding location\n raise WorkflowError(\n \" No locations available for regions!\"\n \" Please specify a location with --google-lifesciences-location \"\n \" or extend --google-lifesciences-regions to find a Life Sciences location.\"\n )",
"def get_location(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/location\"\n })",
"def extended_location(self) -> Optional[pulumi.Input['ExtendedLocationArgs']]:\n return pulumi.get(self, \"extended_location\")",
"def get_location(self):\n\t\treturn self.location",
"def _set_location(self):\n if self._report_key == ReportTypes.MHR_REGISTRATION:\n location = self._report_data.get('location')\n if location.get('lot') or location.get('parcel') or location.get('block') or location.get('districtLot') or\\\n location.get('partOf') or location.get('section') or location.get('township') or \\\n location.get('range') or location.get('meridian') or location.get('landDistrict') or \\\n location.get('plan'):\n location['hasLTSAInfo'] = True\n else:\n location['hasLTSAInfo'] = False\n if location.get('pidNumber'):\n pid = location.get('pidNumber')\n location['pidNumber'] = pid[0:3] + '-' + pid[3:6] + '-' + pid[6:]\n elif self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT, ReportTypes.SEARCH_BODY_REPORT):\n for detail in self._report_data['details']:\n location = detail.get('location')\n if location.get('lot') or location.get('parcel') or location.get('block') or \\\n location.get('districtLot') or location.get('partOf') or location.get('section') or \\\n location.get('township') or location.get('range') or location.get('meridian') or \\\n location.get('landDistrict') or location.get('plan'):\n location['hasLTSAInfo'] = True\n else:\n location['hasLTSAInfo'] = False\n if location.get('pidNumber'):\n pid = location.get('pidNumber')\n location['pidNumber'] = pid[0:3] + '-' + pid[3:6] + '-' + pid[6:]",
"def location(self):\n if \"location\" in self._prop_dict:\n if isinstance(self._prop_dict[\"location\"], OneDriveObjectBase):\n return self._prop_dict[\"location\"]\n else :\n self._prop_dict[\"location\"] = Location(self._prop_dict[\"location\"])\n return self._prop_dict[\"location\"]\n\n return None",
"def extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"extended_location\")",
"def extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"extended_location\")",
"def get_location(self):\r\n request = self.request_dict()\r\n\r\n if not request:\r\n return None\r\n\r\n location = self.json_request(request)\r\n\r\n if not location:\r\n return None\r\n\r\n if location.has_key(\"access_token\"):\r\n self.access_token = location[\"access_token\"]\r\n\r\n return self.location_from_dict(location)",
"def get_location(self) -> Union[str, None]:\n return self._get_location()",
"def GetLocation(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _get_rloc(self):\n return self.__rloc",
"def get_location(self):\r\n return self.__location",
"def LocationAttributeConfig():\n return concepts.ResourceParameterAttributeConfig(\n name='location',\n help_text='Google Cloud location for the {resource}.',\n fallthroughs=[\n deps.PropertyFallthrough(properties.VALUES.container_aws.location)\n ],\n )",
"def _get_location(self):\n return industry.Location(itemID=self.locationID, flagID=self.locationFlagID, ownerID=self.ownerID, typeID=self.locationTypeID)",
"def location(self):\r\n return self._get('location', {})",
"def extended_location(self) -> pulumi.Input['ExtendedLocationArgs']:\n return pulumi.get(self, \"extended_location\")",
"def extended_location(self) -> pulumi.Input['ExtendedLocationArgs']:\n return pulumi.get(self, \"extended_location\")",
"def get_location() -> location.SdcLocation:\n return location.SdcLocation(fac=os.getenv('ref_fac', default='r_fac'), # noqa: SIM112\n poc=os.getenv('ref_poc', default='r_poc'), # noqa: SIM112\n bed=os.getenv('ref_bed', default='r_bed')) # noqa: SIM112",
"def get_location(self):\n return self.location"
] |
[
"0.60400724",
"0.5878248",
"0.5878248",
"0.57633233",
"0.57633233",
"0.567988",
"0.5667443",
"0.5663981",
"0.5646442",
"0.5597646",
"0.55917937",
"0.5571136",
"0.55641526",
"0.547663",
"0.5475348",
"0.54751396",
"0.54690164",
"0.54690164",
"0.54402465",
"0.54215884",
"0.5377993",
"0.537552",
"0.5301474",
"0.52932554",
"0.5291948",
"0.5291214",
"0.5276855",
"0.5276855",
"0.52687216",
"0.5266541"
] |
0.757754
|
0
|
Obtain the value of enable_keda. This function will verify the parameter by default. If both enable_keda and disable_keda are specified, raise a MutuallyExclusiveArgumentError.
|
def get_enable_keda(self) -> bool:
return self._get_enable_keda(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_enable_keda(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n enable_keda = self.raw_param.get(\"enable_keda\")\n\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"workload_auto_scaler_profile\") and # backward compatibility\n self.mc.workload_auto_scaler_profile and\n self.mc.workload_auto_scaler_profile.keda\n ):\n enable_keda = self.mc.workload_auto_scaler_profile.keda.enabled\n\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_keda and self._get_disable_keda(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-keda and --disable-keda at the same time.\"\n )\n\n return enable_keda",
"def _get_disable_keda(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_keda = self.raw_param.get(\"disable_keda\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_keda and self._get_enable_keda(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-keda and --disable-keda at the same time.\"\n )\n\n return disable_keda",
"def get_disable_keda(self) -> bool:\n return self._get_disable_keda(enable_validation=True)",
"def keda(self) -> Optional[pulumi.Input['ManagedClusterWorkloadAutoScalerProfileKedaArgs']]:\n return pulumi.get(self, \"keda\")",
"def _get_enable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_keyvault_kms = self.raw_param.get(\"enable_azure_keyvault_kms\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"security_profile\") and # backward compatibility\n self.mc.security_profile and\n self.mc.security_profile.azure_key_vault_kms\n ):\n enable_azure_keyvault_kms = self.mc.security_profile.azure_key_vault_kms.enabled\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if bool(enable_azure_keyvault_kms) != bool(self._get_azure_keyvault_kms_key_id(enable_validation=False)):\n raise RequiredArgumentMissingError(\n 'You must set \"--enable-azure-keyvault-kms\" and \"--azure-keyvault-kms-key-id\" at the same time.'\n )\n\n return enable_azure_keyvault_kms",
"def _get_enable_aad(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_aad = self.raw_param.get(\"enable_aad\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.managed is not None\n ):\n enable_aad = self.mc.aad_profile.managed\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n enable_validation=False\n )\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if enable_aad:\n if check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-aad\" if managed AAD is already enabled'\n )\n return enable_aad",
"def _get_enable_vpa(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n enable_vpa = self.raw_param.get(\"enable_vpa\")\n\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_vpa and self._get_disable_vpa(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-vpa and --disable-vpa at the same time.\"\n )\n\n return enable_vpa",
"def _check_boolean_value(arg_dict, key):\n to_check_value = arg_dict[key].lower()\n if to_check_value in ['disabled', 'enabled']:\n return 0\n else:\n return -1",
"def enable_acceleration(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_acceleration\")",
"def enable_acceleration(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_acceleration\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True",
"def get_enable_azure_keyvault_kms(self) -> bool:\n return self._get_enable_azure_keyvault_kms(enable_validation=True)",
"def enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enabled\")"
] |
[
"0.7840266",
"0.7120032",
"0.6913669",
"0.658747",
"0.53017634",
"0.5260179",
"0.5184817",
"0.5177506",
"0.50327575",
"0.50327575",
"0.5014237",
"0.5014237",
"0.5014237",
"0.5014237",
"0.5014237",
"0.5014237",
"0.5014237",
"0.5014237",
"0.5001445",
"0.49837804",
"0.49564895",
"0.49564895",
"0.49564895",
"0.49564895",
"0.49564895",
"0.49564895",
"0.49564895",
"0.49564895",
"0.49564895",
"0.49564895"
] |
0.7669549
|
1
|
Internal function to obtain the value of enable_keda. This function supports the option of enable_validation. When enabled, if both enable_keda and disable_keda are specified, raise a MutuallyExclusiveArgumentError.
|
def _get_enable_keda(self, enable_validation: bool = False) -> bool:
# Read the original value passed by the command.
enable_keda = self.raw_param.get("enable_keda")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
hasattr(self.mc, "workload_auto_scaler_profile") and # backward compatibility
self.mc.workload_auto_scaler_profile and
self.mc.workload_auto_scaler_profile.keda
):
enable_keda = self.mc.workload_auto_scaler_profile.keda.enabled
# This parameter does not need dynamic completion.
if enable_validation:
if enable_keda and self._get_disable_keda(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"Cannot specify --enable-keda and --disable-keda at the same time."
)
return enable_keda
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_enable_keda(self) -> bool:\n return self._get_enable_keda(enable_validation=True)",
"def _get_disable_keda(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_keda = self.raw_param.get(\"disable_keda\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_keda and self._get_enable_keda(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-keda and --disable-keda at the same time.\"\n )\n\n return disable_keda",
"def get_disable_keda(self) -> bool:\n return self._get_disable_keda(enable_validation=True)",
"def keda(self) -> Optional[pulumi.Input['ManagedClusterWorkloadAutoScalerProfileKedaArgs']]:\n return pulumi.get(self, \"keda\")",
"def _get_enable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_keyvault_kms = self.raw_param.get(\"enable_azure_keyvault_kms\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"security_profile\") and # backward compatibility\n self.mc.security_profile and\n self.mc.security_profile.azure_key_vault_kms\n ):\n enable_azure_keyvault_kms = self.mc.security_profile.azure_key_vault_kms.enabled\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if bool(enable_azure_keyvault_kms) != bool(self._get_azure_keyvault_kms_key_id(enable_validation=False)):\n raise RequiredArgumentMissingError(\n 'You must set \"--enable-azure-keyvault-kms\" and \"--azure-keyvault-kms-key-id\" at the same time.'\n )\n\n return enable_azure_keyvault_kms",
"def _get_enable_vpa(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n enable_vpa = self.raw_param.get(\"enable_vpa\")\n\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_vpa and self._get_disable_vpa(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-vpa and --disable-vpa at the same time.\"\n )\n\n return enable_vpa",
"def _get_enable_aad(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_aad = self.raw_param.get(\"enable_aad\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.managed is not None\n ):\n enable_aad = self.mc.aad_profile.managed\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n enable_validation=False\n )\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if enable_aad:\n if check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-aad\" if managed AAD is already enabled'\n )\n return enable_aad",
"def _get_disable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_azure_keyvault_kms = self.raw_param.get(\"disable_azure_keyvault_kms\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_azure_keyvault_kms and self._get_enable_azure_keyvault_kms(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-azure-keyvault-kms and --disable-azure-keyvault-kms at the same time.\"\n )\n\n return disable_azure_keyvault_kms",
"def get_enable_vpa(self) -> bool:\n return self._get_enable_vpa(enable_validation=True)",
"def get_enable_azure_keyvault_kms(self) -> bool:\n return self._get_enable_azure_keyvault_kms(enable_validation=True)",
"def _get_enable_private_cluster(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_private_cluster = self.raw_param.get(\"enable_private_cluster\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.api_server_access_profile and\n self.mc.api_server_access_profile.enable_private_cluster is not None\n ):\n enable_private_cluster = self.mc.api_server_access_profile.enable_private_cluster\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n if enable_private_cluster:\n if (\n safe_lower(self._get_load_balancer_sku(enable_validation=False)) ==\n CONST_LOAD_BALANCER_SKU_BASIC\n ):\n raise InvalidArgumentValueError(\n \"Please use standard load balancer for private cluster\"\n )\n if self._get_api_server_authorized_ip_ranges(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--api-server-authorized-ip-ranges is not supported for private cluster\"\n )\n else:\n if self._get_disable_public_fqdn(enable_validation=False):\n raise InvalidArgumentValueError(\n \"--disable-public-fqdn should only be used with --enable-private-cluster\"\n )\n if self._get_private_dns_zone(enable_validation=False):\n raise InvalidArgumentValueError(\n \"Invalid private dns zone for public cluster. It should always be empty for public cluster\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n is_private_cluster = check_is_private_cluster(self.mc)\n if is_private_cluster:\n if self._get_api_server_authorized_ip_ranges(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--api-server-authorized-ip-ranges is not supported for private cluster\"\n )\n else:\n if self._get_disable_public_fqdn(enable_validation=False):\n raise InvalidArgumentValueError(\n \"--disable-public-fqdn can only be used for private cluster\"\n )\n if self._get_enable_public_fqdn(enable_validation=False):\n raise InvalidArgumentValueError(\n \"--enable-public-fqdn can only be used for private cluster\"\n )\n return enable_private_cluster",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True",
"def _get_enable_rbac(self, enable_validation: bool = False) -> Union[bool, None]:\n # read the original value passed by the command\n enable_rbac = self.raw_param.get(\"enable_rbac\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.enable_rbac is not None\n ):\n enable_rbac = self.mc.enable_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_rbac and self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\"specify either '--disable-rbac' or '--enable-rbac', not both.\")\n return enable_rbac",
"def enabled(self) -> Optional[pulumi.Input[bool]]:\n warnings.warn(\"\"\"This field is deprecated. Leave this unset and instead configure BinaryAuthorization using evaluation_mode. If evaluation_mode is set to anything other than EVALUATION_MODE_UNSPECIFIED, this field is ignored.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"enabled is deprecated: This field is deprecated. Leave this unset and instead configure BinaryAuthorization using evaluation_mode. If evaluation_mode is set to anything other than EVALUATION_MODE_UNSPECIFIED, this field is ignored.\"\"\")\n\n return pulumi.get(self, \"enabled\")",
"def EM_advanced_enabled(self):\n\n state = ct.c_int()\n self.lib.GetEMAdvanced(ct.pointer(state))\n return state.value",
"def _check_boolean_value(arg_dict, key):\n to_check_value = arg_dict[key].lower()\n if to_check_value in ['disabled', 'enabled']:\n return 0\n else:\n return -1",
"def get_enable_aad(self) -> bool:\n\n return self._get_enable_aad(enable_validation=True)",
"def _get_enable_public_fqdn(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_public_fqdn = self.raw_param.get(\"enable_public_fqdn\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if enable_public_fqdn:\n if self._get_disable_public_fqdn(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify '--enable-public-fqdn' and '--disable-public-fqdn' at the same time\"\n )\n if not check_is_private_cluster(self.mc):\n raise InvalidArgumentValueError(\n \"--enable-public-fqdn can only be used for private cluster\"\n )\n return enable_public_fqdn",
"def _get_enable_azure_rbac(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_rbac = self.raw_param.get(\"enable_azure_rbac\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.enable_azure_rbac is not None\n ):\n enable_azure_rbac = self.mc.aad_profile.enable_azure_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_azure_rbac:\n if self.decorator_mode == DecoratorMode.CREATE:\n if not self._get_enable_aad(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n if self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--enable-azure-rbac cannot be used together with --disable-rbac\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-azure-rbac\" if managed AAD is not enabled'\n )\n if self._get_disable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-azure-rbac\" and \"--disable-azure-rbac\" at the same time'\n )\n return enable_azure_rbac",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")",
"def _get_azure_keyvault_kms_key_vault_network_access(self, enable_validation: bool = False) -> Union[str, None]:\n # read the original value passed by the command\n azure_keyvault_kms_key_vault_network_access = self.raw_param.get(\n \"azure_keyvault_kms_key_vault_network_access\"\n )\n\n # validation\n if enable_validation:\n enable_azure_keyvault_kms = self._get_enable_azure_keyvault_kms(\n enable_validation=False)\n if azure_keyvault_kms_key_vault_network_access is None:\n raise RequiredArgumentMissingError(\n '\"--azure-keyvault-kms-key-vault-network-access\" is required.')\n\n if (\n azure_keyvault_kms_key_vault_network_access and\n (\n enable_azure_keyvault_kms is None or\n enable_azure_keyvault_kms is False\n )\n ):\n raise RequiredArgumentMissingError(\n '\"--azure-keyvault-kms-key-vault-network-access\" requires \"--enable-azure-keyvault-kms\".')\n\n if azure_keyvault_kms_key_vault_network_access == CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE:\n key_vault_resource_id = self._get_azure_keyvault_kms_key_vault_resource_id(\n enable_validation=False)\n if (\n key_vault_resource_id is None or\n key_vault_resource_id == \"\"\n ):\n raise RequiredArgumentMissingError(\n '\"--azure-keyvault-kms-key-vault-resource-id\" is required '\n 'when \"--azure-keyvault-kms-key-vault-network-access\" is Private.'\n )\n\n return azure_keyvault_kms_key_vault_network_access",
"def _get_enable_ahub(\n self, enable_validation: bool = False\n ) -> bool:\n # read the original value passed by the command\n enable_ahub = self.raw_param.get(\"enable_ahub\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.windows_profile:\n enable_ahub = self.mc.windows_profile.license_type == \"Windows_Server\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_ahub and self._get_disable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return enable_ahub",
"def _get_disable_vpa(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_vpa = self.raw_param.get(\"disable_vpa\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_vpa and self._get_enable_vpa(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-vpa and --disable-vpa at the same time.\"\n )\n\n return disable_vpa"
] |
[
"0.8043966",
"0.7622176",
"0.71619093",
"0.6395778",
"0.5821647",
"0.572448",
"0.5659248",
"0.53447306",
"0.5200703",
"0.5100513",
"0.5003512",
"0.49870613",
"0.49826953",
"0.49732137",
"0.49466726",
"0.49347606",
"0.4917666",
"0.488792",
"0.48556545",
"0.48528686",
"0.48528686",
"0.48528686",
"0.48528686",
"0.48528686",
"0.48528686",
"0.48528686",
"0.48528686",
"0.48444456",
"0.4842914",
"0.48320016"
] |
0.8436318
|
0
|
Obtain the value of disable_keda. This function will verify the parameter by default. If both enable_keda and disable_keda are specified, raise a MutuallyExclusiveArgumentError.
|
def get_disable_keda(self) -> bool:
return self._get_disable_keda(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_disable_keda(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_keda = self.raw_param.get(\"disable_keda\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_keda and self._get_enable_keda(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-keda and --disable-keda at the same time.\"\n )\n\n return disable_keda",
"def _get_enable_keda(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n enable_keda = self.raw_param.get(\"enable_keda\")\n\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"workload_auto_scaler_profile\") and # backward compatibility\n self.mc.workload_auto_scaler_profile and\n self.mc.workload_auto_scaler_profile.keda\n ):\n enable_keda = self.mc.workload_auto_scaler_profile.keda.enabled\n\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_keda and self._get_disable_keda(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-keda and --disable-keda at the same time.\"\n )\n\n return enable_keda",
"def get_enable_keda(self) -> bool:\n return self._get_enable_keda(enable_validation=True)",
"def disable(*args, value: bool=True, **kwargs)->None:\n pass",
"def keda(self) -> Optional[pulumi.Input['ManagedClusterWorkloadAutoScalerProfileKedaArgs']]:\n return pulumi.get(self, \"keda\")",
"def _get_disable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_azure_keyvault_kms = self.raw_param.get(\"disable_azure_keyvault_kms\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_azure_keyvault_kms and self._get_enable_azure_keyvault_kms(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-azure-keyvault-kms and --disable-azure-keyvault-kms at the same time.\"\n )\n\n return disable_azure_keyvault_kms",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def _get_disable_vpa(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_vpa = self.raw_param.get(\"disable_vpa\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_vpa and self._get_enable_vpa(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-vpa and --disable-vpa at the same time.\"\n )\n\n return disable_vpa",
"def is_disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_disabled\")",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"def is_disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_disabled\")",
"def get_disable_vpa(self) -> bool:\n return self._get_disable_vpa(enable_validation=True)",
"def disabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"disabled\")",
"def disable_keyword(self, keywords, disabler, pinned=False, exclude_id=None):\n if isinstance(keywords, (str, unicode)):\n keywords = [keywords]\n\n query_dict = { pair_data.KEYWORD: { '$in': [kw.lower() for kw in keywords] } }\n\n if exclude_id is not None:\n query_dict[pair_data.SEQUENCE] = { '$ne': exclude_id }\n \n return self._disable(query_dict, disabler, pinned)",
"def get_disabled(self):\n return self._disabled",
"def get_disabled(self):\n return self._disabled",
"def disabled(name):\n return not enabled(name)",
"def is_disabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"is_disabled\")"
] |
[
"0.78769684",
"0.7046115",
"0.7018525",
"0.5916144",
"0.580819",
"0.5683263",
"0.55558777",
"0.55558777",
"0.55558777",
"0.55558777",
"0.55558777",
"0.55558777",
"0.55558777",
"0.55558777",
"0.55558777",
"0.55558777",
"0.55558777",
"0.55558777",
"0.5489834",
"0.5451636",
"0.5436373",
"0.5436373",
"0.5379777",
"0.53258324",
"0.5319159",
"0.53168005",
"0.52705306",
"0.52705306",
"0.52570367",
"0.52318954"
] |
0.7675799
|
1
|
Internal function to obtain the value of disable_keda. This function supports the option of enable_validation. When enabled, if both enable_keda and disable_keda are specified, raise a MutuallyExclusiveArgumentError.
|
def _get_disable_keda(self, enable_validation: bool = False) -> bool:
# Read the original value passed by the command.
disable_keda = self.raw_param.get("disable_keda")
# This option is not supported in create mode, hence we do not read the property value from the `mc` object.
# This parameter does not need dynamic completion.
if enable_validation:
if disable_keda and self._get_enable_keda(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"Cannot specify --enable-keda and --disable-keda at the same time."
)
return disable_keda
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_disable_keda(self) -> bool:\n return self._get_disable_keda(enable_validation=True)",
"def _get_enable_keda(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n enable_keda = self.raw_param.get(\"enable_keda\")\n\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"workload_auto_scaler_profile\") and # backward compatibility\n self.mc.workload_auto_scaler_profile and\n self.mc.workload_auto_scaler_profile.keda\n ):\n enable_keda = self.mc.workload_auto_scaler_profile.keda.enabled\n\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_keda and self._get_disable_keda(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-keda and --disable-keda at the same time.\"\n )\n\n return enable_keda",
"def get_enable_keda(self) -> bool:\n return self._get_enable_keda(enable_validation=True)",
"def _get_disable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_azure_keyvault_kms = self.raw_param.get(\"disable_azure_keyvault_kms\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_azure_keyvault_kms and self._get_enable_azure_keyvault_kms(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-azure-keyvault-kms and --disable-azure-keyvault-kms at the same time.\"\n )\n\n return disable_azure_keyvault_kms",
"def _get_disable_vpa(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_vpa = self.raw_param.get(\"disable_vpa\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_vpa and self._get_enable_vpa(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-vpa and --disable-vpa at the same time.\"\n )\n\n return disable_vpa",
"def _get_disable_ahub(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_ahub = self.raw_param.get(\"disable_ahub\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_ahub and self._get_enable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return disable_ahub",
"def get_disable_vpa(self) -> bool:\n return self._get_disable_vpa(enable_validation=True)",
"def _get_disable_rbac(self, enable_validation: bool = False) -> Union[bool, None]:\n # read the original value passed by the command\n disable_rbac = self.raw_param.get(\"disable_rbac\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.enable_rbac is not None\n ):\n disable_rbac = not self.mc.enable_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_rbac and self._get_enable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--enable-azure-rbac cannot be used together with --disable-rbac\"\n )\n if disable_rbac and self._get_enable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\"specify either '--disable-rbac' or '--enable-rbac', not both.\")\n return disable_rbac",
"def keda(self) -> Optional[pulumi.Input['ManagedClusterWorkloadAutoScalerProfileKedaArgs']]:\n return pulumi.get(self, \"keda\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disabled\")",
"def _get_disable_secret_rotation(self, enable_validation: bool = False) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n # read the original value passed by the command\n disable_secret_rotation = self.raw_param.get(\"disable_secret_rotation\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if disable_secret_rotation:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--disable-secret-rotation can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled. \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return disable_secret_rotation",
"def get_disabled(self):\n return self._disabled",
"def get_disabled(self):\n return self._disabled",
"def _get_disable_azure_monitor_metrics(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_azure_monitor_metrics = self.raw_param.get(\"disable_azure_monitor_metrics\")\n if enable_validation:\n if disable_azure_monitor_metrics and self._get_enable_azure_monitor_metrics(False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-azure-monitor-metrics and --disable-azure-monitor-metrics at the same time\"\n )\n return disable_azure_monitor_metrics",
"def get_disable_ahub(self) -> bool:\n return self._get_disable_ahub(enable_validation=True)",
"def disable(*args, value: bool=True, **kwargs)->None:\n pass",
"def get_disable_azure_keyvault_kms(self) -> bool:\n return self._get_disable_azure_keyvault_kms(enable_validation=True)",
"def is_disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_disabled\")",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")"
] |
[
"0.7952239",
"0.7631249",
"0.7269853",
"0.6264979",
"0.6230891",
"0.5819878",
"0.5817734",
"0.55076265",
"0.5475607",
"0.53721267",
"0.53721267",
"0.53721267",
"0.53721267",
"0.53721267",
"0.53721267",
"0.53721267",
"0.53721267",
"0.53721267",
"0.53721267",
"0.53721267",
"0.53721267",
"0.53577685",
"0.5350409",
"0.5350409",
"0.5349147",
"0.5326625",
"0.5283759",
"0.5259885",
"0.52563065",
"0.524268"
] |
0.85147
|
0
|
Obtain the value of storage_profile.
|
def get_storage_profile(self) -> Optional[ManagedClusterStorageProfile]:
profile = self.models.ManagedClusterStorageProfile()
if self.mc.storage_profile is not None:
profile = self.mc.storage_profile
profile.disk_csi_driver = self.get_disk_driver()
profile.file_csi_driver = self.get_file_driver()
profile.blob_csi_driver = self.get_blob_driver()
profile.snapshot_controller = self.get_snapshot_controller()
return profile
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def infra_storage_profile(self) -> Optional[pulumi.Input['CloudProviderProfileInfraStorageProfileArgs']]:\n return pulumi.get(self, \"infra_storage_profile\")",
"def storage_account(self) -> str:\n return pulumi.get(self, \"storage_account\")",
"def profile(self):\n return self._profile",
"def storage_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"storage_id\")",
"def storage_account(self) -> Optional[pulumi.Input['EventhubSpecPropertiesCaptureDescriptionDestinationStorageAccountArgs']]:\n return pulumi.get(self, \"storage_account\")",
"def get_storage(self):\n return self.storage",
"def getStorageConfig(self,storage):\n data = self.connect('get','storage/%s' % (storage),None)\n return data",
"def get_storage_profiles(self):\n return self.config[self.ROOT].keys()",
"def profile(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"profile\")",
"def profile(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"profile\")",
"def security_profile(self) -> Optional['outputs.DiskSecurityProfileResponse']:\n return pulumi.get(self, \"security_profile\")",
"def profile(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"profile\")",
"def profile(self):\n return self.__profile",
"def _get_profile(self):\n return self.sqlfluff_config.get_section(\n (self.templater_selector, self.name, \"profile\")\n )",
"def storage_account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"storage_account_id\")",
"def storage_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"storage_type\")",
"def os_profile(self) -> pulumi.Output[Optional['outputs.OSProfileResponse']]:\n return pulumi.get(self, \"os_profile\")",
"def get_current_profile() -> Optional[Profile]:\n return _PROFILE[-1] if _PROFILE else None",
"def storage_config(self) -> 'outputs.PreventionJobTriggerInspectJobStorageConfig':\n return pulumi.get(self, \"storage_config\")",
"def get_creds(profile, conf):\n if profile:\n store = file.Storage(conf.get_profile(profile))\n return store.get()\n elif len(conf.list_profiles()) > 0:\n store = file.Storage(conf.get_profile(conf.list_profiles()[0]))\n return store.get()\n else:\n return None",
"def get_profile(self):\n endpoint = '/profile'\n return self.get_request(endpoint)",
"def storage_account_id(self) -> str:\n return pulumi.get(self, \"storage_account_id\")",
"def get_profile():\n if environ['DB_INSTANCE'] in request.url_root:\n profile_id = request.form['id']\n profile = ndb.Key(Profile, profile_id).get()\n if profile is not None:\n activity_data = json.loads(profile.activity_data)\n items = activity_data.get('items', [])\n item = items[0]\n return json.dumps(item)\n \n # else (not DB_INSTANCE)\n return ''",
"def profile_data(self):\n return self._profile_data",
"def profileJS(self):\n if self._profile is None:\n self._profile = self.profileGet()\n return self._profile",
"def profile(self) -> Profile:\n return self._profile",
"def profile(self) -> Profile:\n return self._profile",
"def storage_type(self) -> str:\n return pulumi.get(self, \"storage_type\")",
"def get(profile):\n client = boto3client.get(\"iam\", profile)\n return client.list_instance_profiles()",
"def storage_get(context, storage_id):\n return _storage_get(context, storage_id)"
] |
[
"0.6968962",
"0.660511",
"0.6473169",
"0.6440383",
"0.64237225",
"0.64216846",
"0.63830006",
"0.63589996",
"0.6344794",
"0.6344794",
"0.63355017",
"0.6243928",
"0.6243725",
"0.6231311",
"0.619964",
"0.6122273",
"0.61142886",
"0.61056834",
"0.61004585",
"0.60826564",
"0.6046723",
"0.60374075",
"0.6036489",
"0.6031806",
"0.59720093",
"0.59565014",
"0.59565014",
"0.5945199",
"0.5920353",
"0.5918953"
] |
0.67672837
|
1
|
Obtain the value of vnet_subnet_id.
|
def get_vnet_subnet_id(self) -> Union[str, None]:
return self.agentpool_context.get_vnet_subnet_id()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def virtual_network_subnet_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def virtual_network_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def virtual_network_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def subnet_id(self) -> str:\n return pulumi.get(self, \"subnet_id\")",
"def subnet_id(self) -> Optional[str]:\n return pulumi.get(self, \"subnet_id\")",
"def subnet_id(self):\n return self._subnet_id",
"def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")",
"def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")",
"def subnet_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_resource_id\")",
"def app_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_subnet_id\")",
"def subnet(self) -> pulumi.Output[Optional['outputs.ResourceIdResponse']]:\n return pulumi.get(self, \"subnet\")",
"def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:\n return pulumi.get(self, \"subnet\")",
"def vnet_subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"vnet_subnet_ids\")",
"def service_runtime_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_runtime_subnet_id\")",
"def subnet(self) -> Optional['outputs.ApiEntityReferenceResponse']:\n return pulumi.get(self, \"subnet\")",
"def get_appgw_subnet_id(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_SUBNET_ID = addon_consts.get(\"CONST_INGRESS_APPGW_SUBNET_ID\")\n\n # read the original value passed by the command\n appgw_subnet_id = self.raw_param.get(\"appgw_subnet_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_ID) is not None\n ):\n appgw_subnet_id = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_ID)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_subnet_id",
"def service_subnet(self) -> str:\n return pulumi.get(self, \"service_subnet\")",
"def get_virtual_network_id(self):\n\t\treturn call_sdk_function('PrlVmDevNet_GetVirtualNetworkId', self.handle)",
"def subnet_id_lookup(session, subnet_domain):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_subnets(Filters=[{\"Name\": \"tag:Name\", \"Values\": [subnet_domain]}])\n if len(response['Subnets']) == 0:\n return None\n else:\n return response['Subnets'][0]['SubnetId']",
"def get_subnet_by_id(self, id):\n return self.network.get_subnet(id)",
"def virtual_network_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_id\")",
"def virtual_network_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"virtual_network_id\")",
"def cluster_subnet(self) -> str:\n return pulumi.get(self, \"cluster_subnet\")",
"def sc_subnet(self):\n return self._sc_subnet",
"def get_aci_subnet_name(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get(\"CONST_VIRTUAL_NODE_ADDON_NAME\")\n CONST_VIRTUAL_NODE_SUBNET_NAME = addon_consts.get(\"CONST_VIRTUAL_NODE_SUBNET_NAME\")\n\n # read the original value passed by the command\n aci_subnet_name = self.raw_param.get(\"aci_subnet_name\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_VIRTUAL_NODE_ADDON_NAME +\n self.get_virtual_node_addon_os_type()\n in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_VIRTUAL_NODE_ADDON_NAME +\n self.get_virtual_node_addon_os_type()\n ).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME) is not None\n ):\n aci_subnet_name = self.mc.addon_profiles.get(\n CONST_VIRTUAL_NODE_ADDON_NAME +\n self.get_virtual_node_addon_os_type()\n ).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return aci_subnet_name",
"def vnet_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vnet_name\")",
"def subnet_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subnet_group_name\")",
"def subnet_group_name(self) -> str:\n return pulumi.get(self, \"subnet_group_name\")",
"def get_subnet(self, subnet_id, **kwargs):\r\n if 'mask' not in kwargs:\r\n kwargs['mask'] = DEFAULT_SUBNET_MASK\r\n\r\n return self.subnet.getObject(id=subnet_id, **kwargs)",
"def vnet_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vnet_name\")"
] |
[
"0.8586591",
"0.8491768",
"0.8491768",
"0.8350412",
"0.827008",
"0.81594074",
"0.81481266",
"0.81481266",
"0.7513854",
"0.7413974",
"0.72993106",
"0.7277127",
"0.71174496",
"0.6944694",
"0.6929398",
"0.68267703",
"0.67024887",
"0.65791535",
"0.6576807",
"0.65344477",
"0.65188414",
"0.64953583",
"0.64650154",
"0.64001256",
"0.6278549",
"0.62717175",
"0.6236592",
"0.6177912",
"0.6135722",
"0.61293525"
] |
0.858173
|
1
|
Obtain the value of nodepool_labels.
|
def get_nodepool_labels(self) -> Union[Dict[str, str], None]:
return self.agentpool_context.get_nodepool_labels()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def node_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"node_labels\")",
"def node_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"node_labels\")",
"def node_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"node_labels\")",
"def node_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"node_labels\")",
"def get_labels(self):\n return self.labels",
"def plabels(self):\n return self._cache.plabels",
"def labels(self):\n return self._labels",
"def get_labels(self):\n return self.labels[1:]",
"def getLabels(self):\n return self.numToLabel",
"def get_labels(self):\r\n return None",
"def labels(self):\n return self._labels",
"def labels(self):\n return self._labels",
"def labels(self):\n return self._labels",
"def labels(self):\n return self._labels",
"def labels(self):\n return self._labels",
"def labels(self):\n return self._labels",
"def node_labels(self, n_id=None):\n if n_id is None:\n return frozenset(self._nodes_by_label.keys())\n else:\n try:\n node_entry = self._nodes[n_id]\n except KeyError:\n return None\n else:\n return node_entry.labels",
"def labels_(self) -> DNDarray:\n return self._labels",
"def get_labels(self):\n raise NotImplementedError",
"def get_labels(self) -> np.ndarray:\n return self._dataset.get_labels()[self._ids]",
"def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")",
"def get_labels(self) -> List[str]:\n return self.labels",
"def labels(self) -> pulumi.Output[Mapping[str, Any]]:\n return pulumi.get(self, \"labels\")",
"def get_val_labels(self):\n raise NotImplementedError",
"def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")",
"def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")"
] |
[
"0.75618106",
"0.75618106",
"0.75618106",
"0.75618106",
"0.74900925",
"0.72915566",
"0.7250921",
"0.72248256",
"0.7205818",
"0.71913624",
"0.7010873",
"0.7010873",
"0.7010873",
"0.7010873",
"0.7010873",
"0.7010873",
"0.6988701",
"0.69853497",
"0.69787246",
"0.6936395",
"0.69250524",
"0.69250524",
"0.69250524",
"0.69250524",
"0.69250524",
"0.69197226",
"0.69088465",
"0.68892276",
"0.68756557",
"0.68756557"
] |
0.82592916
|
0
|
Internal function to dynamically obtain the value of dns_name_prefix according to the context. When both dns_name_prefix and fqdn_subdomain are not assigned, dynamic completion will be triggerd. A default dns_name_prefix composed of name (cluster), resource_group_name, and subscription_id will be created. This function supports the option of enable_validation. When enabled, it will check if both dns_name_prefix and fqdn_subdomain are assigend, if so, raise the MutuallyExclusiveArgumentError. This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
|
def _get_dns_name_prefix(
self, enable_validation: bool = False, read_only: bool = False
) -> Union[str, None]:
# read the original value passed by the command
dns_name_prefix = self.raw_param.get("dns_name_prefix")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if self.mc and self.mc.dns_prefix is not None:
dns_name_prefix = self.mc.dns_prefix
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return dns_name_prefix
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
if not dns_name_prefix and not self._get_fqdn_subdomain(enable_validation=False):
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
# In case the user does not specify the parameter and it meets the conditions of automatic completion,
# necessary information is dynamically completed.
if dynamic_completion:
name = self.get_name()
resource_group_name = self.get_resource_group_name()
subscription_id = self.get_subscription_id()
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
dns_name_prefix = '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# validation
if enable_validation:
if dns_name_prefix and self._get_fqdn_subdomain(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
return dns_name_prefix
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_dns_name_prefix(self) -> Union[str, None]:\n return self._get_dns_name_prefix(enable_validation=True)",
"def _get_fqdn_subdomain(self, enable_validation: bool = False) -> Union[str, None]:\n # read the original value passed by the command\n fqdn_subdomain = self.raw_param.get(\"fqdn_subdomain\")\n # try to read the property value corresponding to the parameter from the `mc` object\n # Backward Compatibility: We also support api version v2020.11.01 in profile 2020-09-01-hybrid and there is\n # no such attribute.\n if (\n self.mc and\n hasattr(self.mc, \"fqdn_subdomain\") and\n self.mc.fqdn_subdomain is not None\n ):\n fqdn_subdomain = self.mc.fqdn_subdomain\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if fqdn_subdomain:\n if self._get_dns_name_prefix(read_only=True):\n raise MutuallyExclusiveArgumentError(\n \"--dns-name-prefix and --fqdn-subdomain cannot be used at same time\"\n )\n private_dns_zone = self._get_private_dns_zone(enable_validation=False)\n if private_dns_zone:\n if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:\n if not is_valid_resource_id(private_dns_zone):\n raise InvalidArgumentValueError(\n private_dns_zone + \" is not a valid Azure resource ID.\"\n )\n else:\n raise InvalidArgumentValueError(\n \"--fqdn-subdomain should only be used for private cluster with custom private dns zone\"\n )\n return fqdn_subdomain",
"def cname_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cname_prefix\")",
"def get_fqdn_subdomain(self) -> Union[str, None]:\n\n return self._get_fqdn_subdomain(enable_validation=True)",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def _get_gmsa_dns_server_and_root_domain_name(self, enable_validation: bool = False):\n # gmsa_dns_server\n # read the original value passed by the command\n gmsa_dns_server = self.raw_param.get(\"gmsa_dns_server\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n gmsa_dns_read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.windows_profile and\n hasattr(self.mc.windows_profile, \"gmsa_profile\") and # backward compatibility\n self.mc.windows_profile.gmsa_profile and\n self.mc.windows_profile.gmsa_profile.dns_server is not None\n ):\n gmsa_dns_server = self.mc.windows_profile.gmsa_profile.dns_server\n gmsa_dns_read_from_mc = True\n\n # gmsa_root_domain_name\n # read the original value passed by the command\n gmsa_root_domain_name = self.raw_param.get(\"gmsa_root_domain_name\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n gmsa_root_read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.windows_profile and\n hasattr(self.mc.windows_profile, \"gmsa_profile\") and # backward compatibility\n self.mc.windows_profile.gmsa_profile and\n self.mc.windows_profile.gmsa_profile.root_domain_name is not None\n ):\n gmsa_root_domain_name = self.mc.windows_profile.gmsa_profile.root_domain_name\n gmsa_root_read_from_mc = True\n\n # consistent check\n if gmsa_dns_read_from_mc != gmsa_root_read_from_mc:\n raise CLIInternalError(\n \"Inconsistent state detected, one of gmsa_dns_server and gmsa_root_domain_name \"\n \"is read from the `mc` object.\"\n )\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n self.__validate_gmsa_options(\n self._get_enable_windows_gmsa(enable_validation=False),\n gmsa_dns_server,\n gmsa_root_domain_name,\n self.get_yes(),\n )\n return gmsa_dns_server, gmsa_root_domain_name",
"def cname_prefix(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cname_prefix\")",
"def set_dns(self, pardus_profile):\n\n if pardus_profile.get_name_mode() == \"default\":\n default_nameservers = \";\".join( get_default_nameservers())\n default_nameservers = default_nameservers + \";\" # Make sure addresses end with ';'\n self.ignore_auto_dns = \"true\"\n return str(default_nameservers)\n elif pardus_profile.get_name_mode() == \"custom\":\n name_server = str(pardus_profile.get_name_server())\n name_server = name_server + \";\"\n self.ignore_auto_dns = \"true\"\n return str(name_server)\n else:\n # Nothing done in auto option\n return \"none\"",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n cidr: Optional[pulumi.Input[str]] = None,\n commissioning_enabled: Optional[pulumi.Input[bool]] = None,\n internet_advertising_disabled: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_custom_ip_prefix_id: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n roa_validity_end_date: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n wan_validation_signed_message: Optional[pulumi.Input[str]] = None,\n zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Prefix':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _PrefixState.__new__(_PrefixState)\n\n __props__.__dict__[\"cidr\"] = cidr\n __props__.__dict__[\"commissioning_enabled\"] = commissioning_enabled\n __props__.__dict__[\"internet_advertising_disabled\"] = internet_advertising_disabled\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"parent_custom_ip_prefix_id\"] = parent_custom_ip_prefix_id\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"roa_validity_end_date\"] = roa_validity_end_date\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"wan_validation_signed_message\"] = wan_validation_signed_message\n __props__.__dict__[\"zones\"] = zones\n return Prefix(resource_name, opts=opts, __props__=__props__)",
"def generate_config(context):\n project = context.properties['projectId']\n zone_resource_name = context.properties['resourceName']\n\n resources = []\n\n zone_resource = {\n 'name': zone_resource_name,\n # https://cloud.google.com/dns/docs/reference/v1/managedZones\n 'type': 'gcp-types/dns-v1:managedZones',\n 'properties': {\n 'description': 'Routes googleapis.com to restricted.googleapis.com VIP',\n 'dnsName': 'googleapis.com.',\n 'project': project,\n 'visibility': 'private',\n 'privateVisibilityConfig': {\n 'kind': 'dns#managedZonePrivateVisibilityConfig',\n 'networks': [{\n 'kind': 'dns#managedZonePrivateVisibilityConfigNetwork',\n 'networkUrl': context.properties['network']\n }]\n }\n }\n }\n\n # If a dependsOn property was passed in, the network should depend on that.\n if 'dependsOn' in context.properties:\n zone_resource['metadata'] = {\n 'dependsOn': context.properties['dependsOn']\n }\n resources.append(zone_resource)\n\n # Configure the DNS Zone. The two additions below will create Change records which will create ResourceRecordSets.\n # This follows the structure described here: https://cloud.google.com/vpc-service-controls/docs/set-up-private-connectivity#configuring-dns\n resources.append({\n 'name': 'cname-record',\n # https://cloud.google.com/dns/docs/reference/v1/changes/create\n 'action': 'gcp-types/dns-v1:dns.changes.create',\n 'metadata': {\n 'runtimePolicy': [\n 'CREATE',\n ],\n },\n 'properties': {\n 'project': project,\n 'managedZone': '$(ref.{}.name)'.format(zone_resource_name),\n 'additions': [{\n 'name': '*.googleapis.com.',\n 'type': 'CNAME',\n 'ttl': 300,\n 'rrdatas': [ 'restricted.googleapis.com.' ]\n }]\n }\n })\n\n resources.append({\n 'name': 'a-record',\n # https://cloud.google.com/dns/docs/reference/v1/changes/create\n 'action': 'gcp-types/dns-v1:dns.changes.create',\n 'metadata': {\n 'runtimePolicy': [\n 'CREATE',\n ],\n },\n 'properties': {\n 'project': project,\n 'managedZone': '$(ref.{}.name)'.format(zone_resource_name),\n 'additions': [{\n 'name': 'restricted.googleapis.com.',\n 'type': 'A',\n 'ttl': 300,\n 'rrdatas': [\n '199.36.153.4',\n '199.36.153.5',\n '199.36.153.6',\n '199.36.153.7'\n ]\n }]\n }\n })\n\n return {'resources': resources}",
"def _get_enable_public_fqdn(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_public_fqdn = self.raw_param.get(\"enable_public_fqdn\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if enable_public_fqdn:\n if self._get_disable_public_fqdn(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify '--enable-public-fqdn' and '--disable-public-fqdn' at the same time\"\n )\n if not check_is_private_cluster(self.mc):\n raise InvalidArgumentValueError(\n \"--enable-public-fqdn can only be used for private cluster\"\n )\n return enable_public_fqdn",
"def _get_prefix(obj):\n return obj._prefix if obj._prefix is not PREFIX_NOT_SET else DEFAULT_PREFIX",
"def service_dns_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_dns_name\")",
"def cluster_dns_domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_dns_domain\")",
"def test_get_random_job_prefix(self):\r\n\r\n s1 = self.pw._get_random_job_prefix()\r\n s2 = self.pw._get_random_job_prefix()\r\n self.assertNotEqual(s1, s2)\r\n self.assertEqual(len(s1), 10)\r\n self.assertEqual(len(s2), 10)\r\n\r\n # different max len\r\n s1 = self.pw._get_random_job_prefix(max_job_prefix_len=22)\r\n self.assertEqual(len(s1), 22)\r\n\r\n # fixed_prefix added\r\n s1 = self.pw._get_random_job_prefix(fixed_prefix='TEST')\r\n s2 = self.pw._get_random_job_prefix(fixed_prefix='TEST')\r\n self.assertNotEqual(s1, s2)\r\n self.assertEqual(len(s1), 10)\r\n self.assertTrue(s1.startswith('TEST'))\r\n self.assertTrue(s2.startswith('TEST'))\r\n # leading/trailing underscores added\r\n self.assertTrue(s1.startswith('TEST_'))\r\n self.assertTrue(s1.endswith('_'))\r\n\r\n # no leading/trailing underscores\r\n s1 = self.pw._get_random_job_prefix(leading_trailing_underscores=False)\r\n self.assertFalse(s1.startswith('_'))\r\n self.assertFalse(s1.endswith('_'))\r\n\r\n # combo of all parameters\r\n s1 = self.pw._get_random_job_prefix(leading_trailing_underscores=False,\r\n fixed_prefix='HELLO', max_job_prefix_len=12)\r\n self.assertEqual(len(s1), 12)\r\n self.assertTrue(s1.startswith('HELLO'))\r\n self.assertFalse(s1.endswith('_'))",
"def _get_argparse_prefix(self, prefix, group_name):\n if group_name is not None:\n return group_name + '-' + prefix\n else:\n return prefix",
"def _expand_prefix(prefix, configs):\n return subst_vars(prefix, configs)",
"async def prefix(self, ctx, *, new_prefix: str = None):\n\n if not ctx.guild:\n if new_prefix:\n await ctx.error(\"Prefix cannot be set in DMs.\")\n return\n\n await ctx.info(f\"Prefix is {self.bot.default_prefix}\")\n return\n\n if not new_prefix:\n guild_prefix = self.bot.prefixes.get(ctx.guild.id)\n prefix = guild_prefix or self.bot.default_prefix\n await ctx.info(f\"Prefix is {prefix}\")\n return\n\n if await checks.check_is_admin(ctx):\n await db.execute_sql(\n \"INSERT OR REPLACE INTO prefixes(guild_id, prefix)\"\n \"VALUES(?, ?)\", (ctx.guild.id, new_prefix)\n )\n\n self.bot.prefixes[ctx.guild.id] = new_prefix\n\n await ctx.info(f\"Prefix set to {new_prefix}\")\n return\n\n await ctx.error(\"Prefix can only be set by admins.\")\n return",
"async def set_prefix(self, ctx, prefix: commands.clean_content, allow_default=False):\n\n if len(prefix) > 25:\n return await ctx.send(\":no_entry: | prefixes can't be 25 characters or greater.\")\n\n if re.findall(r\"<a?:\\w*:\\d*>\", prefix):\n return await ctx.send(\":no_entry: | emoji's are not allowed as a guild's prefix\")\n\n if re.findall(r'https?://(?:[-\\\\w.]|(?:%[\\\\da-fA-F]{2}))+', prefix):\n return await ctx.send(\":no_entry: | urls are not allowed as a guild's prefix\")\n\n async with ctx.acquire():\n await ctx.db.execute(\"\"\"\n INSERT INTO guilds (guild_id, prefix, allow_default) VALUES ($1, $2, $3)\n ON CONFLICT (guild_id) DO UPDATE SET (prefix, allow_default) = ($2, $3)\n \n \"\"\", ctx.guild.id, prefix, allow_default)\n\n await ctx.send(f\"The prefix for this guild is now {prefix}\")",
"def _hostname_prefix(self, hostname_str):\n\n if not hostname_str or len(hostname_str) == 0:\n msg = _(\"Invalid Hostname: %(hostname_str)s for storage\") % \\\n locals()\n LOG.exception(msg)\n ex_args = {'hostname': hostname_str}\n raise SVCInvalidHostnameError(**ex_args)\n if isinstance(hostname_str, unicode):\n hostname_str = hostname_str.translate(\n self._unicode_host_name_filter)\n elif isinstance(hostname_str, str):\n hostname_str = hostname_str.translate(\n self._string_host_name_filter)\n else:\n msg = _(\"Cannot clean host name: %(hostname_str)s for storage\") % \\\n locals()\n LOG.exception(msg)\n ex_args = {'hostname': hostname_str}\n raise SVCInvalidHostnameError(**ex_args)\n hostname_str = str(hostname_str)\n return hostname_str[:55]",
"def add_prefix(self, field_name):\r\n return self.prefix and ('%s.%s' % (self.prefix, field_name)) or field_name",
"def dns_support_and_hostnames_enabled_validator(\n args: ResourceValidationArgs, report_violation: ReportViolation\n):\n if args.resource_type == \"aws:ec2/vpc:Vpc\":\n if args.props[\"enableDnsHostnames\"] is not True:\n report_violation(\"You didn't enable DNS hostnames for VPC: \" +\n f\"'{args.props['tags']['Name']}'\" +\n \"\\nChange value to True.\")\n if args.props[\"enableDnsSupport\"] is not True:\n report_violation(\"You didn't enable DNS support for VPC: \" +\n f\"'{args.props['tags']['Name']}'\" +\n \"\\nChange value to True.\")",
"def ip_address_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address_prefix\")"
] |
[
"0.6694165",
"0.61246324",
"0.5349504",
"0.510091",
"0.50762033",
"0.50762033",
"0.50762033",
"0.50762033",
"0.50762033",
"0.50762033",
"0.50762033",
"0.50762033",
"0.50749433",
"0.5021455",
"0.50058",
"0.49834308",
"0.4929956",
"0.49134374",
"0.49113208",
"0.4905484",
"0.48933032",
"0.48766783",
"0.48728818",
"0.48721004",
"0.48247835",
"0.4815087",
"0.48066518",
"0.47932422",
"0.4789904",
"0.47703344"
] |
0.7448428
|
0
|
Dynamically obtain the value of dns_name_prefix according to the context. When both dns_name_prefix and fqdn_subdomain are not assigned, dynamic completion will be triggerd. A default dns_name_prefix composed of name (cluster), resource_group_name, and subscription_id will be created. This function will verify the parameter by default. It will check if both dns_name_prefix and fqdn_subdomain are assigend, if so, raise the MutuallyExclusiveArgumentError.
|
def get_dns_name_prefix(self) -> Union[str, None]:
return self._get_dns_name_prefix(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_dns_name_prefix(\n self, enable_validation: bool = False, read_only: bool = False\n ) -> Union[str, None]:\n # read the original value passed by the command\n dns_name_prefix = self.raw_param.get(\"dns_name_prefix\")\n # try to read the property value corresponding to the parameter from the `mc` object\n read_from_mc = False\n if self.mc and self.mc.dns_prefix is not None:\n dns_name_prefix = self.mc.dns_prefix\n read_from_mc = True\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return dns_name_prefix\n\n dynamic_completion = False\n # check whether the parameter meet the conditions of dynamic completion\n if not dns_name_prefix and not self._get_fqdn_subdomain(enable_validation=False):\n dynamic_completion = True\n # disable dynamic completion if the value is read from `mc`\n dynamic_completion = dynamic_completion and not read_from_mc\n # In case the user does not specify the parameter and it meets the conditions of automatic completion,\n # necessary information is dynamically completed.\n if dynamic_completion:\n name = self.get_name()\n resource_group_name = self.get_resource_group_name()\n subscription_id = self.get_subscription_id()\n # Use subscription id to provide uniqueness and prevent DNS name clashes\n name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]\n if not name_part[0].isalpha():\n name_part = (str('a') + name_part)[0:10]\n resource_group_part = re.sub(\n '[^A-Za-z0-9-]', '', resource_group_name)[0:16]\n dns_name_prefix = '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])\n\n # validation\n if enable_validation:\n if dns_name_prefix and self._get_fqdn_subdomain(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--dns-name-prefix and --fqdn-subdomain cannot be used at same time\"\n )\n return dns_name_prefix",
"def _get_fqdn_subdomain(self, enable_validation: bool = False) -> Union[str, None]:\n # read the original value passed by the command\n fqdn_subdomain = self.raw_param.get(\"fqdn_subdomain\")\n # try to read the property value corresponding to the parameter from the `mc` object\n # Backward Compatibility: We also support api version v2020.11.01 in profile 2020-09-01-hybrid and there is\n # no such attribute.\n if (\n self.mc and\n hasattr(self.mc, \"fqdn_subdomain\") and\n self.mc.fqdn_subdomain is not None\n ):\n fqdn_subdomain = self.mc.fqdn_subdomain\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if fqdn_subdomain:\n if self._get_dns_name_prefix(read_only=True):\n raise MutuallyExclusiveArgumentError(\n \"--dns-name-prefix and --fqdn-subdomain cannot be used at same time\"\n )\n private_dns_zone = self._get_private_dns_zone(enable_validation=False)\n if private_dns_zone:\n if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:\n if not is_valid_resource_id(private_dns_zone):\n raise InvalidArgumentValueError(\n private_dns_zone + \" is not a valid Azure resource ID.\"\n )\n else:\n raise InvalidArgumentValueError(\n \"--fqdn-subdomain should only be used for private cluster with custom private dns zone\"\n )\n return fqdn_subdomain",
"def cname_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cname_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"name_prefix\")",
"def cluster_dns_domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_dns_domain\")",
"def test_get_random_job_prefix(self):\r\n\r\n s1 = self.pw._get_random_job_prefix()\r\n s2 = self.pw._get_random_job_prefix()\r\n self.assertNotEqual(s1, s2)\r\n self.assertEqual(len(s1), 10)\r\n self.assertEqual(len(s2), 10)\r\n\r\n # different max len\r\n s1 = self.pw._get_random_job_prefix(max_job_prefix_len=22)\r\n self.assertEqual(len(s1), 22)\r\n\r\n # fixed_prefix added\r\n s1 = self.pw._get_random_job_prefix(fixed_prefix='TEST')\r\n s2 = self.pw._get_random_job_prefix(fixed_prefix='TEST')\r\n self.assertNotEqual(s1, s2)\r\n self.assertEqual(len(s1), 10)\r\n self.assertTrue(s1.startswith('TEST'))\r\n self.assertTrue(s2.startswith('TEST'))\r\n # leading/trailing underscores added\r\n self.assertTrue(s1.startswith('TEST_'))\r\n self.assertTrue(s1.endswith('_'))\r\n\r\n # no leading/trailing underscores\r\n s1 = self.pw._get_random_job_prefix(leading_trailing_underscores=False)\r\n self.assertFalse(s1.startswith('_'))\r\n self.assertFalse(s1.endswith('_'))\r\n\r\n # combo of all parameters\r\n s1 = self.pw._get_random_job_prefix(leading_trailing_underscores=False,\r\n fixed_prefix='HELLO', max_job_prefix_len=12)\r\n self.assertEqual(len(s1), 12)\r\n self.assertTrue(s1.startswith('HELLO'))\r\n self.assertFalse(s1.endswith('_'))",
"def get_fqdn_subdomain(self) -> Union[str, None]:\n\n return self._get_fqdn_subdomain(enable_validation=True)",
"def cname_prefix(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cname_prefix\")",
"def service_dns_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_dns_name\")",
"def _hostname_prefix(self, hostname_str):\n\n if not hostname_str or len(hostname_str) == 0:\n msg = _(\"Invalid Hostname: %(hostname_str)s for storage\") % \\\n locals()\n LOG.exception(msg)\n ex_args = {'hostname': hostname_str}\n raise SVCInvalidHostnameError(**ex_args)\n if isinstance(hostname_str, unicode):\n hostname_str = hostname_str.translate(\n self._unicode_host_name_filter)\n elif isinstance(hostname_str, str):\n hostname_str = hostname_str.translate(\n self._string_host_name_filter)\n else:\n msg = _(\"Cannot clean host name: %(hostname_str)s for storage\") % \\\n locals()\n LOG.exception(msg)\n ex_args = {'hostname': hostname_str}\n raise SVCInvalidHostnameError(**ex_args)\n hostname_str = str(hostname_str)\n return hostname_str[:55]",
"def domain_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain_name\")",
"def _get_argparse_prefix(self, prefix, group_name):\n if group_name is not None:\n return group_name + '-' + prefix\n else:\n return prefix",
"def target_domain_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_domain_name\")",
"def _get_unique_name(self, name: str, prefix: str):\n if name is None:\n return prefix + \"-\" + PolicyPool.__generate_random_string()\n elif name in self._active_workers.keys():\n return name + PolicyPool.__generate_random_string()\n else:\n return name",
"def dns(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns\")",
"def bucket_domain_name(self) -> str:\n ...",
"async def prefix(self, ctx, prefix: str = None):\n if not prefix:\n try:\n return await ctx.send(f'My prefix here is `{self.bot.prefixes[str(ctx.guild.id)]}`. You can change that with `{ctx.prefix}prefix <prefix>`')\n except KeyError:\n return await ctx.send(f'My prefix here is `{config.prefix[0]}`. You can change that with `{ctx.prefix}prefix <prefix>`')\n db = pymysql.connect(config.db_ip, config.db_user, config.db_pass, config.db_name)\n cur = db.cursor()\n cur.execute(\n f\"\"\"INSERT INTO settings (guildid, prefix) VALUES ({ctx.guild.id}, \"{prefix}\") ON DUPLICATE KEY UPDATE prefix = \"{prefix}\";\"\"\")\n db.commit()\n db.close()\n self.bot.prefixes = get_all_prefixes()\n await ctx.send(f':ok_hand: Successfully set my prefix here to `{prefix}`')",
"def name_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name_prefix\")",
"def ip_address_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address_prefix\")",
"async def prefix(self, ctx, *, new_prefix: str = None):\n\n if not ctx.guild:\n if new_prefix:\n await ctx.error(\"Prefix cannot be set in DMs.\")\n return\n\n await ctx.info(f\"Prefix is {self.bot.default_prefix}\")\n return\n\n if not new_prefix:\n guild_prefix = self.bot.prefixes.get(ctx.guild.id)\n prefix = guild_prefix or self.bot.default_prefix\n await ctx.info(f\"Prefix is {prefix}\")\n return\n\n if await checks.check_is_admin(ctx):\n await db.execute_sql(\n \"INSERT OR REPLACE INTO prefixes(guild_id, prefix)\"\n \"VALUES(?, ?)\", (ctx.guild.id, new_prefix)\n )\n\n self.bot.prefixes[ctx.guild.id] = new_prefix\n\n await ctx.info(f\"Prefix set to {new_prefix}\")\n return\n\n await ctx.error(\"Prefix can only be set by admins.\")\n return",
"def verify_prefix(node_list, prefix, prefix_len=64, stable=True, priority='med', on_mesh=False, slaac=False, dhcp=False,\n configure=False, default_route=False, preferred=True):\n for node in node_list:\n print node.get(wpan.WPAN_THREAD_ON_MESH_PREFIXES)\n prefixes = wpan_table_parser.parse_on_mesh_prefix_result(node.get(wpan.WPAN_THREAD_ON_MESH_PREFIXES))\n for p in prefixes:\n if p.prefix == prefix:\n verify(int(p.prefix_len) == prefix_len)\n verify(p.is_stable() == stable)\n verify(p.is_on_mesh() == on_mesh)\n verify(p.is_def_route() == default_route)\n verify(p.is_slaac() == slaac)\n verify(p.is_dhcp() == dhcp)\n verify(p.is_config() == configure)\n verify(p.is_preferred() == preferred)\n verify(p.priority == priority)\n break\n else:\n print \"Did not find prefix {} on node {}\".format(prefix, node)\n exit(1)",
"def set_dns(self, pardus_profile):\n\n if pardus_profile.get_name_mode() == \"default\":\n default_nameservers = \";\".join( get_default_nameservers())\n default_nameservers = default_nameservers + \";\" # Make sure addresses end with ';'\n self.ignore_auto_dns = \"true\"\n return str(default_nameservers)\n elif pardus_profile.get_name_mode() == \"custom\":\n name_server = str(pardus_profile.get_name_server())\n name_server = name_server + \";\"\n self.ignore_auto_dns = \"true\"\n return str(name_server)\n else:\n # Nothing done in auto option\n return \"none\"",
"def connection_string_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connection_string_prefix\")"
] |
[
"0.682005",
"0.586163",
"0.56214464",
"0.52388936",
"0.52388936",
"0.52388936",
"0.52388936",
"0.52388936",
"0.52388936",
"0.52388936",
"0.52388936",
"0.52154386",
"0.51832396",
"0.51107603",
"0.5110376",
"0.5059555",
"0.50069714",
"0.49874726",
"0.49273393",
"0.4908478",
"0.48107708",
"0.47861207",
"0.47708142",
"0.47607106",
"0.4759528",
"0.47587568",
"0.4757633",
"0.47462872",
"0.47430083",
"0.4739531"
] |
0.6307889
|
1
|
Obtain the value of node_osdisk_diskencryptionset_id.
|
def get_node_osdisk_diskencryptionset_id(self) -> Union[str, None]:
# read the original value passed by the command
node_osdisk_diskencryptionset_id = self.raw_param.get("node_osdisk_diskencryptionset_id")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.disk_encryption_set_id is not None
):
node_osdisk_diskencryptionset_id = self.mc.disk_encryption_set_id
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_osdisk_diskencryptionset_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def disk_encryption_set_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disk_encryption_set_id\")",
"def secure_vm_disk_encryption_set_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secure_vm_disk_encryption_set_id\")",
"def disk_datastore_id(self, disk_id):\n try:\n return self.disk(disk_id).find(\"DATASTORE_ID\").text\n except AttributeError:\n return None",
"def disk_access_id(self) -> Optional[str]:\n return pulumi.get(self, \"disk_access_id\")",
"def get_id(disk):\n\n #TODO\n return \"Unknown\"",
"def disk_serial_number(self) -> str:\n return pulumi.get(self, \"disk_serial_number\")",
"def premium_data_disk_storage_account_id(self) -> str:\n return pulumi.get(self, \"premium_data_disk_storage_account_id\")",
"def data_set_id(self) -> str:\n return self.__data_set_id",
"def storage_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"storage_id\")",
"def get_disc_number(self) -> Optional[int]:\n return self.disc_number",
"def get_id(self):\n if not self.nccl_id:\n logger.warning(\"The NCCL ID has not been \"\n \"set yet for store {}.\".format(self.name))\n return self.nccl_id",
"def type(self) -> Optional[pulumi.Input[Union[str, 'DiskEncryptionSetIdentityType']]]:\n return pulumi.get(self, \"type\")",
"def getID(self):\n return str(self._storage_id)",
"def getSerpentId(self):\n return \"{}-nat\".format(self.element.symbol.capitalize())",
"def kms_key_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"kms_key_id\")",
"def source_disk_id(self) -> str:\n return pulumi.get(self, \"source_disk_id\")",
"def source_disk_id(self) -> str:\n return pulumi.get(self, \"source_disk_id\")",
"def recovery_target_protection_container_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"recovery_target_protection_container_id\")",
"def download_id(self):\n return self._download_id",
"def getSerpentId(self):\n symbol = self.element.symbol.capitalize()\n return \"{}-{}{}\".format(symbol, self.a, \"m\" if self.state else \"\")",
"def nit_sin_digito_verificacion(self):\n\n return self.identificacion.split('-')[0]",
"def codigo_desbloqueio(self):\n return self._codigo_desbloqueio",
"def partition_cfg_data_id(self) -> Optional[str]:\n return self._part_config_data_id",
"def getDeviceID(self, unitCode=0):\n resp = self.XAPCommand('DID', unitCode=unitCode)\n return int(resp)",
"def getSerpentId(self):\n raise NotImplementedError",
"def getId(self):\n return _libsbml.Deletion_getId(self)",
"def key(self):\n return str(self._id)",
"def key_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"key_id\")",
"def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")",
"def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")"
] |
[
"0.67298186",
"0.64388275",
"0.59858125",
"0.5931737",
"0.587634",
"0.55603164",
"0.54654",
"0.5460655",
"0.5446653",
"0.5400962",
"0.5372715",
"0.5357355",
"0.5334524",
"0.533248",
"0.5307974",
"0.53060496",
"0.53060496",
"0.5286388",
"0.52851397",
"0.5281432",
"0.52798504",
"0.5272911",
"0.52514493",
"0.5248984",
"0.5248042",
"0.52384764",
"0.52361727",
"0.5233744",
"0.5220925",
"0.5220925"
] |
0.88044465
|
0
|
Obtain the value of ssh_key_value and no_ssh_key.
|
def get_ssh_key_value_and_no_ssh_key(self) -> Tuple[str, bool]:
# ssh_key_value
# read the original value passed by the command
raw_value = self.raw_param.get("ssh_key_value")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if (
self.mc and
self.mc.linux_profile and
self.mc.linux_profile.ssh and
self.mc.linux_profile.ssh.public_keys
):
public_key_obj = safe_list_get(
self.mc.linux_profile.ssh.public_keys, 0, None
)
if public_key_obj:
value_obtained_from_mc = public_key_obj.key_data
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
ssh_key_value = value_obtained_from_mc
read_from_mc = True
else:
ssh_key_value = raw_value
# no_ssh_key
# read the original value passed by the command
no_ssh_key = self.raw_param.get("no_ssh_key")
# consistent check
if read_from_mc and no_ssh_key:
raise CLIInternalError(
"Inconsistent state detected, ssh_key_value is read from the `mc` object while no_ssh_key is enabled."
)
# these parameters do not need dynamic completion
# validation
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(
ssh_key_value
):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise InvalidArgumentValueError(
"Provided ssh key ({}) is invalid or non-existent".format(
shortened_key
)
)
return ssh_key_value, no_ssh_key
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_ssh_key(self, profile):\n ssh_key = '/home/ssm-user/bastion'\n if self._value.has_option(profile, 'ssh_key'):\n ssh_key = self._value.get(profile, 'ssh_key')\n self.logger.info(\"%s is selected as a ssh user\" % ssh_key)\n return ssh_key",
"def get_ssh_key():\n path = os.environ.get(\"TUNE_CLUSTER_SSH_KEY\",\n os.path.expanduser(\"~/ray_bootstrap_key.pem\"))\n if os.path.exists(path):\n return path\n return None",
"def get_value(self, key: str) -> Optional[str]:\n raise NotImplementedError",
"def getRemoteKey(cmd, path, ip, user, passwd):\n\n sshToOtherClient(ip, user, passwd, cmd)\n showKeyCmd = 'cat %s' % (path)\n remote_key = sshToOtherClient(ip, user, passwd, showKeyCmd)\n logging.debug(\"Remote key for %s has been generated successfully : %s\",\n ip, remote_key)\n return remote_key",
"def ssh(self) -> Optional[pulumi.Input['LinuxProfilePropertiesSshArgs']]:\n return pulumi.get(self, \"ssh\")",
"def host_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"host_key\")",
"def _get_value(self, key: Text) -> Any:\n value = self._container_status_dict.get(key)\n if value is None:\n # return an empty string for printing in the log\n return \"\"\n\n return value",
"def get_value(key):\n\n oErr = ErrHandle()\n infoval = None\n try:\n obj = TsgInfo.objects.filter(infokey__iexact=key).first()\n if obj != None:\n infoval = obj.infoval\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"TsgInfo/get_value\")\n return infoval",
"def get_value(self, key):\r\n if self.hash_table[self.horner_hash(key)] is not None:\r\n if self.hash_table[self.horner_hash(key)].key == key:\r\n return self.hash_table[self.horner_hash(key)].value\r\n else:\r\n return None",
"def get_value(self, key):\n pass",
"def get_ssh():\n\n ip = str(sc.sticky[\"SSH\"]['ip'])\n port = str(sc.sticky[\"SSH\"]['port'])\n user = str(sc.sticky[\"SSH\"]['user'])\n pw = str(sc.sticky[\"SSH\"]['password'])\n\n ssh_dict = {'ip': ip, 'port': port, 'user': user, 'password': pw}\n\n return ssh_dict",
"def check_ssh_key(self):\n return True",
"def get_conf_value(self, key):\n command = [Command.ozone, \"getconf -confKey \" + key]\n exit_code, output = util.run_docker_command(command, self.om)\n return str(output).strip()",
"def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")",
"def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")",
"def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")",
"def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")",
"def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")",
"def private_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_key\")",
"def get_key_value_command():\n # Get Args needed for the command\n incident = demisto.args().get('id', get_investigation_id())\n key = demisto.args().get('key')\n # Search Collection for incident_id and key\n search = incident + '.key'\n result = COLLECTION.find_one({search: key}, {'_id': False})\n value = result[incident].get('value')\n contents = {\n 'Incident': incident,\n 'Key': key,\n 'Value': value,\n 'Modified': result.get(incident).get('modified')\n }\n human_readable = tableToMarkdown('The key and value that is stored for the incident', contents)\n ec = {'MongoDB.Entry(val.Key === obj.Key)': contents}\n return human_readable, ec, {}",
"def getPubKey(User):\n with settings(key_filename='/Users/eric/.ssh/id_rsa.pub', host_string=watt):\n with cd('/home/%s/.ssh' % (User)):\n auth_keyfile = sudo(\n '( [ -f authorized_keys ] && echo \"authorized_keys\" ) || ( [ -f authorized_keys2 ] && echo \"authorized_keys2\" )')\n key = sudo('head -1 %s' % auth_keyfile)\n\n return key",
"def get_value(self, key):\n if key not in self._config:\n raise ValueError(\"%s not in self.config\"%key)\n return self._config[key][\"value\"]",
"def private_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_key\")",
"def private_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_key\")",
"def get_value(command):\n if is_get(command) or is_delete(command):\n return None\n elif is_insert(command) or is_update(command):\n return command.split(\" \")[2]",
"def private_key(self):\n return self.__get_option('private_key')",
"def keypair_lookup(session):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_key_pairs()\n\n # If SSH_KEY exists and points to a valid Key Pair, use it\n key = os.environ.get(\"SSH_KEY\", None) # reuse bastion.py env vars\n if key is not None:\n kp_name = os.path.basename(key)\n if kp_name.endswith(\".pem\"):\n kp_name = kp_name[:-4]\n for kp in response['KeyPairs']:\n if kp[\"KeyName\"] == kp_name:\n return kp_name\n\n print(\"Key Pairs\")\n for i in range(len(response['KeyPairs'])):\n print(\"{}: {}\".format(i, response['KeyPairs'][i]['KeyName']))\n if len(response['KeyPairs']) == 0:\n return None\n while True:\n try:\n idx = input(\"[0]: \")\n idx = int(idx if len(idx) > 0 else \"0\")\n return response['KeyPairs'][idx]['KeyName']\n except KeyboardInterrupt:\n sys.exit(1)\n except:\n print(\"Invalid Key Pair number, try again\")",
"def get_conn_env_var(self, connector, key):\n return self.env[connector]['vars'][key]['value']",
"def get_key(self, key_id):\r\n return self.sshkey.getObject(id=key_id)",
"def get_value(self, key):\n return self[key]['value']"
] |
[
"0.61679715",
"0.5973256",
"0.5830172",
"0.5803089",
"0.570957",
"0.57062227",
"0.56491816",
"0.56238395",
"0.56121117",
"0.5565539",
"0.5533316",
"0.5528271",
"0.55282676",
"0.55056614",
"0.55056614",
"0.55056614",
"0.55056614",
"0.55056614",
"0.55056614",
"0.547856",
"0.5455921",
"0.54461473",
"0.54399645",
"0.54399645",
"0.54393893",
"0.53934",
"0.5380335",
"0.5370601",
"0.5370396",
"0.5366546"
] |
0.82931334
|
0
|
Obtain the value of admin_username.
|
def get_admin_username(self) -> str:
# read the original value passed by the command
admin_username = self.raw_param.get("admin_username")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.linux_profile and
self.mc.linux_profile.admin_username is not None
):
admin_username = self.mc.linux_profile.admin_username
# this parameter does not need dynamic completion
# this parameter does not need validation
return admin_username
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")",
"def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")",
"def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")",
"def admin_username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"admin_username\")",
"def get_username(self):\r\n return self.username",
"def get_username(self):\n return self.username",
"def get_username(self):\n return str(getattr(self, self.USERNAME_FIELD))",
"def get_username(self) -> str:\n return self._username",
"def admin_user(self) -> pulumi.Input['LabVirtualMachineAdminUserArgs']:\n return pulumi.get(self, \"admin_user\")",
"def username(self) :\n\t\ttry :\n\t\t\treturn self._username\n\t\texcept Exception as e:\n\t\t\traise e",
"def GetUsername(self):\n return self._username",
"def admin_site_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"admin_site_name\")",
"def username(self):\n return self._query_config()['username']",
"def username(self):\n return self._username()",
"def username(self):\n return self._username",
"def username(self):\n return self._username",
"def username(self):\n return self._username",
"def getUsername(self):\n\t\treturn self.Username.lower()",
"def username(self):\n return self.user.username",
"def username(self) -> str:\n return pulumi.get(self, \"username\")",
"def username(self) -> str:\n return pulumi.get(self, \"username\")",
"def username(self) -> str:\n return pulumi.get(self, \"username\")",
"def get_username(self):\n if self.controller.oem_config:\n return 'oem'\n return self.username",
"def get_username(self):\n return self.browser.find_element(*locators.USER_NAME_TEXT).text",
"def GetUsername(self):\n pass",
"def username(self, instance):\r\n return instance.user.username",
"def get_username(self, master_id):\r\n return self._handler.get_username(master_id)",
"def username(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"username\")",
"def _get_username(self):\n name = self._get_username_from_cookies()\n if name:\n return name\n if self._oauth and self._login_info[0]:\n return self._login_info[0]\n return self._get_username_from_api()",
"def get_auth_username():\n return DEFAULT_AUTH_USERNAME.get()"
] |
[
"0.8626268",
"0.8626268",
"0.8626268",
"0.855688",
"0.74516624",
"0.7449859",
"0.72956645",
"0.7219957",
"0.7098218",
"0.70965356",
"0.70321",
"0.7005626",
"0.6985179",
"0.6983305",
"0.69539434",
"0.69539434",
"0.69539434",
"0.6949018",
"0.6928104",
"0.69225544",
"0.69225544",
"0.69225544",
"0.6916941",
"0.6893553",
"0.6892086",
"0.6864221",
"0.683364",
"0.6832816",
"0.6808746",
"0.6803398"
] |
0.87167406
|
0
|
Dynamically obtain the value of windows_admin_username and windows_admin_password according to the context.
|
def get_windows_admin_username_and_password(
self,
) -> Tuple[Union[str, None], Union[str, None]]:
return self._get_windows_admin_username_and_password(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_windows_admin_password(self) -> Union[str, None]:\n # read the original value passed by the command\n windows_admin_password = self.raw_param.get(\"windows_admin_password\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return windows_admin_password",
"def _get_windows_admin_username_and_password(\n self, read_only: bool = False, enable_validation: bool = False\n ) -> Tuple[Union[str, None], Union[str, None]]:\n # windows_admin_username\n # read the original value passed by the command\n windows_admin_username = self.raw_param.get(\"windows_admin_username\")\n # try to read the property value corresponding to the parameter from the `mc` object\n username_read_from_mc = False\n if (\n self.mc and\n self.mc.windows_profile and\n self.mc.windows_profile.admin_username is not None\n ):\n windows_admin_username = self.mc.windows_profile.admin_username\n username_read_from_mc = True\n\n # windows_admin_password\n # read the original value passed by the command\n windows_admin_password = self.raw_param.get(\"windows_admin_password\")\n # try to read the property value corresponding to the parameter from the `mc` object\n password_read_from_mc = False\n if (\n self.mc and\n self.mc.windows_profile and\n self.mc.windows_profile.admin_password is not None\n ):\n windows_admin_password = self.mc.windows_profile.admin_password\n password_read_from_mc = True\n\n # consistent check\n if username_read_from_mc != password_read_from_mc:\n raise CLIInternalError(\n \"Inconsistent state detected, one of windows admin name and password is read from the `mc` object.\"\n )\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return windows_admin_username, windows_admin_password\n\n username_dynamic_completion = False\n # check whether the parameter meet the conditions of dynamic completion\n # to avoid that windows_admin_password is set but windows_admin_username is not\n if windows_admin_username is None and windows_admin_password:\n username_dynamic_completion = True\n # disable dynamic completion if the value is read from `mc`\n username_dynamic_completion = (\n username_dynamic_completion and not username_read_from_mc\n )\n if username_dynamic_completion:\n try:\n windows_admin_username = prompt(\"windows_admin_username: \")\n # The validation for admin_username in ManagedClusterWindowsProfile will fail even if\n # users still set windows_admin_username to empty here\n except NoTTYException:\n raise NoTTYError(\n \"Please specify username for Windows in non-interactive mode.\"\n )\n\n password_dynamic_completion = False\n # check whether the parameter meet the conditions of dynamic completion\n # to avoid that windows_admin_username is set but windows_admin_password is not\n if windows_admin_password is None and windows_admin_username:\n password_dynamic_completion = True\n # disable dynamic completion if the value is read from `mc`\n password_dynamic_completion = (\n password_dynamic_completion and not password_read_from_mc\n )\n if password_dynamic_completion:\n try:\n windows_admin_password = prompt_pass(\n msg=\"windows-admin-password: \", confirm=True\n )\n except NoTTYException:\n raise NoTTYError(\n \"Please specify both username and password in non-interactive mode.\"\n )\n\n # validation\n # Note: The external parameters involved in the validation are not verified in their own getters.\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n if not any([windows_admin_username, windows_admin_password]):\n if self._get_enable_windows_gmsa(\n enable_validation=False\n ) or any(self._get_gmsa_dns_server_and_root_domain_name(\n enable_validation=False\n )):\n raise RequiredArgumentMissingError(\n \"Please set windows admin username and password before setting gmsa related configs.\"\n )\n return windows_admin_username, windows_admin_password",
"def get_admin_username(self) -> str:\n # read the original value passed by the command\n admin_username = self.raw_param.get(\"admin_username\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.linux_profile and\n self.mc.linux_profile.admin_username is not None\n ):\n admin_username = self.mc.linux_profile.admin_username\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return admin_username",
"def admin_credentials(self) -> pulumi.Input['AdministrativeCredentialsArgs']:\n return pulumi.get(self, \"admin_credentials\")",
"def user():\n\treturn request.authorization.username if zk.get_http_login() else zk.get_username()",
"def getDbAdminUser():\n if \"DB_ADMIN\" in controller.CONF.keys():\n return controller.CONF[\"DB_ADMIN\"]\n\n return basedefs.DB_ADMIN",
"def admin_username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"admin_username\")",
"def nscaweb_pwd(self): \n return self.__get_option('nscaweb_pwd')",
"def newcred(self):\n return {'login': input('username: '),\n 'password': getpass.getpass()}",
"def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")",
"def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")",
"def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")",
"def get_user():\n return os.getenv(\"USER\")",
"def _config(request):\n return request.environ['adminish']",
"def _config_credentials_get():\n user = input(\"username:\")\n password = getpass.getpass()\n url = input(\"url:\")\n return user, password, url",
"def get_auth_username():\n return DEFAULT_AUTH_USERNAME.get()",
"def get_username():\r\n return get_creds(CREDS_FILE)[1]",
"def get_provider_credentials(provider):\n logging.info('Getting provider credentials for {}'.format(provider))\n uppercase_provider = provider.upper()\n username_variable = '{}_USERNAME'.format(uppercase_provider)\n authentication_variable = '{}_AUTHENTICATION'.format(uppercase_provider)\n username = os.environ.get(username_variable, '')\n authentication = os.environ[authentication_variable]\n return authentication, username",
"def getDbUser():\n if \"DB_ADMIN\" in controller.CONF.keys():\n return controller.CONF[\"DB_ADMIN\"]\n\n return basedefs.DB_USER",
"def credentials_from_cmd(self):\n username = raw_input(\"Email:\")\n pw = getpass.getpass()\n return username, pw",
"def __get_username(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVEN_USERNAME')",
"def _current_login_user(self):\n return self.env.uid",
"def make_shell_context():\n return {'User': User}",
"def admin_user(self) -> pulumi.Input['LabVirtualMachineAdminUserArgs']:\n return pulumi.get(self, \"admin_user\")",
"def get_login_info(self):\n username = raw_input(\"Username: \")\n password = getpass.getpass(\"Password:\")\n return (username, password)",
"def _get_auth_string(self):",
"def get_user():\n return getpass.getuser()",
"def get_user():\n return getpass.getuser()",
"def _db_credentials(self):\n if self.password == \"\" or self.password == \"RUNTIME\":\n sys.stdout.write(PROMPT + \"Database password: \")\n sys.stdout.flush()\n self.password = getpass.getpass()\n elif self.password == \"ENV\":\n self.password = os.environ[\"sql_password\"]\n db_host = quote(self.hostname)\n db_name = quote(self.database)\n db_user = quote(self.username)\n db_password = quote_plus(self.password)\n if \"@\" in db_password:\n logging.warning(\n \"%sWARNING:%s Using the '@' symbol in your database password can cause login issues with SQL Alchemy.%s\"\n % (WARN + bold + R, W, WARN)\n )\n return db_host, db_name, db_user, db_password",
"def username(self) -> str:\n return self.get_env_var(self.username_var)"
] |
[
"0.7026003",
"0.666714",
"0.63422173",
"0.62357956",
"0.6139254",
"0.6005626",
"0.5972383",
"0.594172",
"0.59377044",
"0.59334505",
"0.59334505",
"0.59334505",
"0.59157515",
"0.5905103",
"0.58902407",
"0.5879214",
"0.5821186",
"0.5772675",
"0.576862",
"0.5750392",
"0.57445866",
"0.5736539",
"0.5735441",
"0.56872034",
"0.5664917",
"0.5657461",
"0.56230175",
"0.56230175",
"0.5612176",
"0.5598134"
] |
0.7639984
|
0
|
Obtain the value of windows_admin_password.
|
def get_windows_admin_password(self) -> Union[str, None]:
# read the original value passed by the command
windows_admin_password = self.raw_param.get("windows_admin_password")
# this parameter does not need dynamic completion
# this parameter does not need validation
return windows_admin_password
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_windows_admin_username_and_password(\n self,\n ) -> Tuple[Union[str, None], Union[str, None]]:\n return self._get_windows_admin_username_and_password(enable_validation=True)",
"def admin_password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_password\")",
"def get_password(self):\n return self.controller.dbfilter.db.get('passwd/user-password')",
"def get_password(self):\n mpw = master_pass.MPW(self.user, self.master_password)\n return mpw.password(self.ucs_server)",
"def _get_password(self):\r\n return self._password",
"def password(self) -> str:\n return self.get_env_var(self.password_var)",
"def password(self) -> str:\n return self.get_env_var(self.password_var)",
"def _get_password(self):\n return self._password",
"def get_password(self):\n return self.__password",
"def getPassword(self):\n\t\treturn self.Password",
"def GetPassword(self):\n return self._password",
"def get_auth_password():\n password = AUTH_PASSWORD_SCRIPT.get()\n if password:\n return password\n return DEFAULT_AUTH_PASSWORD.get()",
"def get_password(self) -> str:\n return self._password",
"def settings_app_password(self):\n return self._settings_app_password",
"def nscaweb_pwd(self): \n return self.__get_option('nscaweb_pwd')",
"def password(self):\n return self._password()",
"def password(self) -> str:\n return pulumi.get(self, \"password\")",
"def password(self) -> str:\n return pulumi.get(self, \"password\")",
"def password(self) -> str:\n return pulumi.get(self, \"password\")",
"def password(self) :\n\t\ttry :\n\t\t\treturn self._password\n\t\texcept Exception as e:\n\t\t\traise e",
"def password(self):\n return self._password",
"def password(self):\n return self._password",
"def password(self):\n return self._password",
"def password(self):\n return self._password",
"def device_password(self) -> str:\n return pulumi.get(self, \"device_password\")",
"def device_password(self) -> str:\n return pulumi.get(self, \"device_password\")",
"def _get_user_password(self):\n return self.__user_password",
"def password( self ):\n return self._password",
"def password(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"password\")",
"def password(self):\n return self.factory.server_password"
] |
[
"0.8053318",
"0.791414",
"0.73620445",
"0.7306501",
"0.725623",
"0.72475207",
"0.72475207",
"0.7243568",
"0.7101687",
"0.70582354",
"0.705318",
"0.7032517",
"0.7019539",
"0.69891137",
"0.69397044",
"0.686471",
"0.684138",
"0.684138",
"0.684138",
"0.6839299",
"0.68377525",
"0.68377525",
"0.68377525",
"0.68377525",
"0.6793273",
"0.6793273",
"0.6788647",
"0.67692864",
"0.6767862",
"0.67553186"
] |
0.8988906
|
0
|
Internal function to obtain the value of enable_ahub.
|
def _get_enable_ahub(
self, enable_validation: bool = False
) -> bool:
# read the original value passed by the command
enable_ahub = self.raw_param.get("enable_ahub")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if self.mc and self.mc.windows_profile:
enable_ahub = self.mc.windows_profile.license_type == "Windows_Server"
# this parameter does not need dynamic completion
# validation
if enable_validation:
if enable_ahub and self._get_disable_ahub(enable_validation=False):
raise MutuallyExclusiveArgumentError(
'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time'
)
return enable_ahub
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_enable_ahub(self) -> bool:\n return self._get_enable_ahub(enable_validation=True)",
"def _get_enable(self):\n return self.__enable",
"def _get_enable(self):\n return self.__enable",
"def get_disable_ahub(self) -> bool:\n return self._get_disable_ahub(enable_validation=True)",
"def _get_disable_ahub(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_ahub = self.raw_param.get(\"disable_ahub\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_ahub and self._get_enable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return disable_ahub",
"def readHAL_enable(self):\r\n return self.hal['enable']",
"def h_a(self):\n return self._h_a",
"def enable_health(self):\n return self._enable_health",
"def GetAlpha(self):\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetAlpha(self)",
"def EM_advanced_enabled(self):\n\n state = ct.c_int()\n self.lib.GetEMAdvanced(ct.pointer(state))\n return state.value",
"def is_enabled(self):",
"def get_ao_manual_control_enable( channel ):\n enable = bool32(0)\n CALL('GetPhysicalChanAOManualControlEnable', channel, byref(enable))\n return bool( enable.value )",
"def get_AIA(self):\n\n return self.get_POW().getAIA()",
"def cbAIn( BoardNum, Chan, Gain, DataValue = 0 ):\n cDataValue = ctypes.c_ushort( DataValue )\n CHK( cbw.cbAIn( BoardNum, Chan, Gain, ctypes.byref( cDataValue ) ) )\n return cDataValue.value",
"def Get_HighPassFilter_Enabled(self):\r\n return self.__readFromRegisterWithDictionaryMatch(self.__REG_RW_CTRL_REG5, self.__MASK_CTRL_REG5_HPEN, self.__EnabledDict)",
"def get_enable_interval(self):\n return self.quad_enable_interval",
"def f_pha(self):\n return self._f_pha",
"def asy_hucb(gp, acq_optimiser, anc_data):\n return _halluc_ucb(gp, acq_optimiser, anc_data)",
"def get_ha_state():\n\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><high-availability><state></state></high-availability></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def adhoc(self):\n return self._adhoc",
"def _get_enable_peer_as_check(self):\n return self.__enable_peer_as_check",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True",
"def getEnabled(self):\n if getattr(self, 'installedversion', None) != __version__ :\n return False\n return self.getField('enabled').get(self)",
"def enable():\n if not _status_apf():\n return __apf_cmd(\"-s\")",
"def GetAlpha(self):\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetAlpha(self)",
"def get_ha_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/high-availability\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def aof_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"aof_enabled\")",
"def H(self, qubit_expr):\n self.apply_gate_operation(cirq.ops.H, qubit_expr)",
"def get_enable_aad(self) -> bool:\n\n return self._get_enable_aad(enable_validation=True)",
"def get_HA(HA_):\n HA, Self, HA_nt, HA_vt, HA_zt = 0, 0, 0, 0, 0\n if HA_ == \"HA\":\n HA = 1\n if HA_ == \"Self\":\n Self = 1\n if HA_ == \"nt\":\n HA_nt = 1\n if HA_ == \"vt\":\n HA_vt = 1\n if HA_ == \"zt\":\n HA_zt = 1\n\n return HA, Self, HA_nt, HA_vt, HA_zt"
] |
[
"0.7462605",
"0.6318883",
"0.6318883",
"0.6077914",
"0.59144866",
"0.5763726",
"0.54309964",
"0.5307891",
"0.51156825",
"0.5101385",
"0.510088",
"0.5100028",
"0.5099904",
"0.508271",
"0.507767",
"0.5060925",
"0.5052044",
"0.5033189",
"0.4997755",
"0.49873328",
"0.4957311",
"0.49562043",
"0.49509424",
"0.49330342",
"0.49112085",
"0.49051267",
"0.4903022",
"0.4891856",
"0.48608306",
"0.48586503"
] |
0.68327963
|
1
|
Obtain the value of enable_ahub.
|
def get_enable_ahub(self) -> bool:
return self._get_enable_ahub(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_enable_ahub(\n self, enable_validation: bool = False\n ) -> bool:\n # read the original value passed by the command\n enable_ahub = self.raw_param.get(\"enable_ahub\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.windows_profile:\n enable_ahub = self.mc.windows_profile.license_type == \"Windows_Server\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_ahub and self._get_disable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return enable_ahub",
"def get_disable_ahub(self) -> bool:\n return self._get_disable_ahub(enable_validation=True)",
"def _get_enable(self):\n return self.__enable",
"def _get_enable(self):\n return self.__enable",
"def _get_disable_ahub(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_ahub = self.raw_param.get(\"disable_ahub\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_ahub and self._get_enable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return disable_ahub",
"def readHAL_enable(self):\r\n return self.hal['enable']",
"def enable_health(self):\n return self._enable_health",
"def get_AIA(self):\n\n return self.get_POW().getAIA()",
"def h_a(self):\n return self._h_a",
"def aof_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"aof_enabled\")",
"def get_enable_interval(self):\n return self.quad_enable_interval",
"def GetAlpha(self):\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetAlpha(self)",
"def enabled(self):\n return self._get('enabled')",
"def getEnabled(self):\n if getattr(self, 'installedversion', None) != __version__ :\n return False\n return self.getField('enabled').get(self)",
"def get_ha_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/high-availability\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def Get_HighPassFilter_Enabled(self):\r\n return self.__readFromRegisterWithDictionaryMatch(self.__REG_RW_CTRL_REG5, self.__MASK_CTRL_REG5_HPEN, self.__EnabledDict)",
"def get_ha_state():\n\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><high-availability><state></state></high-availability></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def cbAIn( BoardNum, Chan, Gain, DataValue = 0 ):\n cDataValue = ctypes.c_ushort( DataValue )\n CHK( cbw.cbAIn( BoardNum, Chan, Gain, ctypes.byref( cDataValue ) ) )\n return cDataValue.value",
"def EM_advanced_enabled(self):\n\n state = ct.c_int()\n self.lib.GetEMAdvanced(ct.pointer(state))\n return state.value",
"def f_pha(self):\n return self._f_pha",
"def get_ao_manual_control_enable( channel ):\n enable = bool32(0)\n CALL('GetPhysicalChanAOManualControlEnable', channel, byref(enable))\n return bool( enable.value )",
"def get_AKI(self):\n\n return self.get_POW().getAKI()",
"def egu(self):\n return self.motor_egu.get()",
"def get_haiku(self):\n return self.haiku",
"def organization_enable_status(self) -> str:\n return pulumi.get(self, \"organization_enable_status\")",
"def account_enabled(self):\n if \"accountEnabled\" in self._prop_dict:\n return self._prop_dict[\"accountEnabled\"]\n else:\n return None",
"def account_enabled(self):\n if \"accountEnabled\" in self._prop_dict:\n return self._prop_dict[\"accountEnabled\"]\n else:\n return None",
"def Enabled(self):\n return self._get_attribute('enabled')",
"def enable(self):\n return self._packet.get('enable', False)\n\n # TODO: TCONT and GEM lists",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True"
] |
[
"0.6801624",
"0.63087666",
"0.60537994",
"0.60537994",
"0.5973692",
"0.5859612",
"0.53436804",
"0.5278385",
"0.51970494",
"0.51906353",
"0.5175489",
"0.5152148",
"0.5133611",
"0.50854313",
"0.5080718",
"0.5071474",
"0.5058121",
"0.50457716",
"0.50438493",
"0.50340617",
"0.500907",
"0.49909663",
"0.49848425",
"0.49809766",
"0.49766693",
"0.4973337",
"0.4973337",
"0.4972751",
"0.49589702",
"0.49488625"
] |
0.7624808
|
0
|
Internal function to obtain the value of disable_ahub.
|
def _get_disable_ahub(self, enable_validation: bool = False) -> bool:
# read the original value passed by the command
disable_ahub = self.raw_param.get("disable_ahub")
# We do not support this option in create mode, therefore we do not read the value from `mc`.
# this parameter does not need dynamic completion
# validation
if enable_validation:
if disable_ahub and self._get_enable_ahub(enable_validation=False):
raise MutuallyExclusiveArgumentError(
'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time'
)
return disable_ahub
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_disable_ahub(self) -> bool:\n return self._get_disable_ahub(enable_validation=True)",
"def get_enable_ahub(self) -> bool:\n return self._get_enable_ahub(enable_validation=True)",
"def _get_enable_ahub(\n self, enable_validation: bool = False\n ) -> bool:\n # read the original value passed by the command\n enable_ahub = self.raw_param.get(\"enable_ahub\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.windows_profile:\n enable_ahub = self.mc.windows_profile.license_type == \"Windows_Server\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_ahub and self._get_disable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return enable_ahub",
"def get_disabled_switch(self):\n return self.disabled",
"def get_disabled(self):\n return self._disabled",
"def get_disabled(self):\n return self._disabled",
"def _get_enable(self):\n return self.__enable",
"def _get_enable(self):\n return self.__enable",
"def check_disabled(self):\n return None",
"def get_utility_value(self):\n raise AIError(\"Must be implemented in child class!\")",
"def get_disable(self, btn):\n return self._disabled_buttons[btn]",
"def getDisabledPlugin(self, *args):\n return _libsbml.SBase_getDisabledPlugin(self, *args)",
"def disabled(self):\n return self._disabled",
"def disabled(self):\n return self._disabled",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"def GET_optin(self, msg_hash):\r\n return self._render_opt_in_out(msg_hash, False)",
"def readHAL_enable(self):\r\n return self.hal['enable']",
"def _isdisable(self):\n return self.dp.state()==PyTango.DevState.DISABLE",
"def _bidding_function(self):\n return self._bid_value",
"def _get_ldp_sync_hold_down(self):\n return self.__ldp_sync_hold_down",
"def h_a(self):\n return self._h_a",
"def disabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"disabled\")",
"def get_disable_vpa(self) -> bool:\n return self._get_disable_vpa(enable_validation=True)",
"def get_humidity_disabled(self, sensor):\n if sensor >= self. num_humidities or sensor < 0:\n raise I2CException('Illegal sensor index {} specified'.format(sensor))\n\n return self.__humidity_disabled[sensor]",
"def EnableAsyncConfSlaveFlowRemovedHardTimeOut(self):\n\t\treturn self._get_attribute('enableAsyncConfSlaveFlowRemovedHardTimeOut')",
"def disabled(name):\n return not enabled(name)",
"def disabled_reason(self) -> Sequence[str]:\n return pulumi.get(self, \"disabled_reason\")",
"def GET_optout(self, msg_hash):\r\n return self._render_opt_in_out(msg_hash, True)",
"def value(self):\n \n print(\"Cannot calculate value for base class BSOption.\" )\n return 0"
] |
[
"0.73368704",
"0.6569043",
"0.62654716",
"0.54976517",
"0.5437569",
"0.5437569",
"0.5412723",
"0.5412723",
"0.5338816",
"0.5176854",
"0.51386905",
"0.5069727",
"0.49032664",
"0.49032664",
"0.49006057",
"0.49006057",
"0.48878363",
"0.48853374",
"0.4878255",
"0.48782134",
"0.48716727",
"0.48625875",
"0.48327598",
"0.48274723",
"0.48082003",
"0.4792174",
"0.478015",
"0.47707346",
"0.47687945",
"0.47586137"
] |
0.72198933
|
1
|
Obtain the value of disable_ahub.
|
def get_disable_ahub(self) -> bool:
return self._get_disable_ahub(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_disable_ahub(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_ahub = self.raw_param.get(\"disable_ahub\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_ahub and self._get_enable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return disable_ahub",
"def get_enable_ahub(self) -> bool:\n return self._get_enable_ahub(enable_validation=True)",
"def _get_enable_ahub(\n self, enable_validation: bool = False\n ) -> bool:\n # read the original value passed by the command\n enable_ahub = self.raw_param.get(\"enable_ahub\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.windows_profile:\n enable_ahub = self.mc.windows_profile.license_type == \"Windows_Server\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_ahub and self._get_disable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return enable_ahub",
"def get_disabled_switch(self):\n return self.disabled",
"def get_disabled(self):\n return self._disabled",
"def get_disabled(self):\n return self._disabled",
"def _get_enable(self):\n return self.__enable",
"def _get_enable(self):\n return self.__enable",
"def get_disable(self, btn):\n return self._disabled_buttons[btn]",
"def get_utility_value(self):\n raise AIError(\"Must be implemented in child class!\")",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"def check_disabled(self):\n return None",
"def egu(self):\n return self.motor_egu.get()",
"def GET_optin(self, msg_hash):\r\n return self._render_opt_in_out(msg_hash, False)",
"def disabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"disabled\")",
"def getDisabledPlugin(self, *args):\n return _libsbml.SBase_getDisabledPlugin(self, *args)",
"def value(self):\n \n print(\"Cannot calculate value for base class BSOption.\" )\n return 0",
"def disabled(self):\n return self._disabled",
"def disabled(self):\n return self._disabled",
"def highvalue(self):\r\n return resource.HighValue(self)",
"def readHAL_enable(self):\r\n return self.hal['enable']",
"def disabled_by_microsoft(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"disabled_by_microsoft\")",
"def GET_optout(self, msg_hash):\r\n return self._render_opt_in_out(msg_hash, True)",
"def is_disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_disabled\")",
"def get_value(self):\n return None",
"def get_value(self):\r\n return 0",
"def egu(self):\n return self._egu",
"def get_humidity_disabled(self, sensor):\n if sensor >= self. num_humidities or sensor < 0:\n raise I2CException('Illegal sensor index {} specified'.format(sensor))\n\n return self.__humidity_disabled[sensor]",
"def get_enable_interval(self):\n return self.quad_enable_interval"
] |
[
"0.7102809",
"0.67173636",
"0.6186511",
"0.56364834",
"0.5617407",
"0.5617407",
"0.53054506",
"0.53054506",
"0.52797973",
"0.5225883",
"0.520139",
"0.520139",
"0.5176742",
"0.5133651",
"0.50889903",
"0.50808585",
"0.5063292",
"0.50578654",
"0.50462526",
"0.50462526",
"0.5037325",
"0.50372887",
"0.49815527",
"0.4980108",
"0.49651447",
"0.4940492",
"0.493605",
"0.4932358",
"0.49321392",
"0.48979893"
] |
0.7416804
|
0
|
Internal function to obtain the value of enable_windows_gmsa. This function supports the option of enable_validation. Please refer to function __validate_gmsa_options for details of validation.
|
def _get_enable_windows_gmsa(self, enable_validation: bool = False) -> bool:
# read the original value passed by the command
enable_windows_gmsa = self.raw_param.get("enable_windows_gmsa")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.windows_profile and
hasattr(self.mc.windows_profile, "gmsa_profile") and # backward compatibility
self.mc.windows_profile.gmsa_profile and
self.mc.windows_profile.gmsa_profile.enabled is not None
):
enable_windows_gmsa = self.mc.windows_profile.gmsa_profile.enabled
# this parameter does not need dynamic completion
# validation
if enable_validation:
(
gmsa_dns_server,
gmsa_root_domain_name,
) = self._get_gmsa_dns_server_and_root_domain_name(
enable_validation=False
)
self.__validate_gmsa_options(
enable_windows_gmsa, gmsa_dns_server, gmsa_root_domain_name, self.get_yes()
)
return enable_windows_gmsa
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_enable_windows_gmsa(self) -> bool:\n return self._get_enable_windows_gmsa(enable_validation=True)",
"def __validate_gmsa_options(\n self,\n enable_windows_gmsa,\n gmsa_dns_server,\n gmsa_root_domain_name,\n yes,\n ) -> None:\n if enable_windows_gmsa:\n if gmsa_dns_server is None and gmsa_root_domain_name is None:\n msg = (\n \"Please assure that you have set the DNS server in the vnet used by the cluster \"\n \"when not specifying --gmsa-dns-server and --gmsa-root-domain-name\"\n )\n if not yes and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException()\n elif not all([gmsa_dns_server, gmsa_root_domain_name]):\n raise RequiredArgumentMissingError(\n \"You must set or not set --gmsa-dns-server and --gmsa-root-domain-name at the same time.\"\n )\n else:\n if any([gmsa_dns_server, gmsa_root_domain_name]):\n raise RequiredArgumentMissingError(\n \"You only can set --gmsa-dns-server and --gmsa-root-domain-name \"\n \"when setting --enable-windows-gmsa.\"\n )",
"def gmsa_profile(self) -> Optional[pulumi.Input['WindowsGmsaProfileArgs']]:\n return pulumi.get(self, \"gmsa_profile\")",
"def _get_gmsa_dns_server_and_root_domain_name(self, enable_validation: bool = False):\n # gmsa_dns_server\n # read the original value passed by the command\n gmsa_dns_server = self.raw_param.get(\"gmsa_dns_server\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n gmsa_dns_read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.windows_profile and\n hasattr(self.mc.windows_profile, \"gmsa_profile\") and # backward compatibility\n self.mc.windows_profile.gmsa_profile and\n self.mc.windows_profile.gmsa_profile.dns_server is not None\n ):\n gmsa_dns_server = self.mc.windows_profile.gmsa_profile.dns_server\n gmsa_dns_read_from_mc = True\n\n # gmsa_root_domain_name\n # read the original value passed by the command\n gmsa_root_domain_name = self.raw_param.get(\"gmsa_root_domain_name\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n gmsa_root_read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.windows_profile and\n hasattr(self.mc.windows_profile, \"gmsa_profile\") and # backward compatibility\n self.mc.windows_profile.gmsa_profile and\n self.mc.windows_profile.gmsa_profile.root_domain_name is not None\n ):\n gmsa_root_domain_name = self.mc.windows_profile.gmsa_profile.root_domain_name\n gmsa_root_read_from_mc = True\n\n # consistent check\n if gmsa_dns_read_from_mc != gmsa_root_read_from_mc:\n raise CLIInternalError(\n \"Inconsistent state detected, one of gmsa_dns_server and gmsa_root_domain_name \"\n \"is read from the `mc` object.\"\n )\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n self.__validate_gmsa_options(\n self._get_enable_windows_gmsa(enable_validation=False),\n gmsa_dns_server,\n gmsa_root_domain_name,\n self.get_yes(),\n )\n return gmsa_dns_server, gmsa_root_domain_name",
"def get_enable_mapset_check():\n global enable_mapset_check\n return enable_mapset_check",
"def get_enable_vpa(self) -> bool:\n return self._get_enable_vpa(enable_validation=True)",
"def get_enable_msi_auth_for_monitoring(self) -> Union[bool, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get(\"CONST_MONITORING_USING_AAD_MSI_AUTH\")\n\n # read the original value passed by the command\n enable_msi_auth_for_monitoring = self.raw_param.get(\"enable_msi_auth_for_monitoring\")\n if (\n self.mc and\n self.mc.service_principal_profile and\n self.mc.service_principal_profile.client_id is not None\n ):\n return False\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_MONITORING_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_MONITORING_ADDON_NAME\n ).config.get(CONST_MONITORING_USING_AAD_MSI_AUTH) is not None\n ):\n enable_msi_auth_for_monitoring = (\n safe_lower(\n self.mc.addon_profiles.get(CONST_MONITORING_ADDON_NAME).config.get(\n CONST_MONITORING_USING_AAD_MSI_AUTH\n )\n ) == \"true\"\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return enable_msi_auth_for_monitoring",
"def _get_enable_vpa(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n enable_vpa = self.raw_param.get(\"enable_vpa\")\n\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_vpa and self._get_disable_vpa(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-vpa and --disable-vpa at the same time.\"\n )\n\n return enable_vpa",
"def set_up_goma(self):\n if self.disable_goma():\n return False\n\n goma_path = self._find_goma_path()\n # We honor --cc-wrapper if it is set explicitly.\n if self.cc_wrapper():\n self._values['goma'] = False\n print '%s is used instead of Goma.' % self.cc_wrapper()\n return False\n\n if not goma_path:\n # Do not use Goma as it is not installed.\n self._values['goma'] = False\n return False\n\n self._values['goma'] = True\n self._values['cc_wrapper'] = os.path.join(goma_path, 'gomacc')\n\n if not self._goma_ctl_process:\n goma_ctl_command = [os.path.join(goma_path, 'goma_ctl.py'),\n self._get_goma_ensure_start_command()]\n # It takes about 2 seconds to run goma_ctl.py ensure_start. To\n # reduce the total time for ./configure, we run this in background\n # and check the exit status of it in the atexit handler.\n self._goma_ctl_process = subprocess.Popen(goma_ctl_command,\n stdout=subprocess.PIPE)\n atexit.register(self.wait_for_goma_ctl)\n return True",
"def HasAGWFlag(self, flag):\r\n\r\n agwStyle = self.GetAGWWindowStyleFlag()\r\n res = (agwStyle & flag and [True] or [False])[0]\r\n return res",
"def GetAGWFlags(self):\r\n \r\n return self._agwFlags",
"def GetAGWFlags(self):\r\n\r\n return self._agwFlags",
"def GetAGWFlags(self):\r\n\r\n return self._agwFlags",
"def GetAGWFlags(self):\r\n\r\n return self._agwFlags",
"def GetAGWFlags(self):\r\n\r\n return self._agwFlags",
"def _get_enable_ahub(\n self, enable_validation: bool = False\n ) -> bool:\n # read the original value passed by the command\n enable_ahub = self.raw_param.get(\"enable_ahub\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.windows_profile:\n enable_ahub = self.mc.windows_profile.license_type == \"Windows_Server\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_ahub and self._get_disable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return enable_ahub",
"def _get_enable_azure_monitor_metrics(self, enable_validation: bool = False) -> bool:\n # print(\"_get_enable_azure_monitor_metrics being called...\")\n # Read the original value passed by the command.\n enable_azure_monitor_metrics = self.raw_param.get(\"enable_azure_monitor_metrics\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"azure_monitor_profile\") and\n self.mc.azure_monitor_profile and\n self.mc.azure_monitor_profile.metrics\n ):\n enable_azure_monitor_metrics = self.mc.azure_monitor_profile.metrics.enabled\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_azure_monitor_metrics and self._get_disable_azure_monitor_metrics(False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-azure-monitor-metrics and --disable-azure-monitor-metrics at the same time\"\n )\n if enable_azure_monitor_metrics and not check_is_msi_cluster(self.mc):\n raise RequiredArgumentMissingError(\n \"--enable-azure-monitor-metrics can only be specified for clusters with managed identity enabled\"\n )\n return enable_azure_monitor_metrics",
"def _get_enable_local_accounts(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_local_accounts = self.raw_param.get(\"enable_local_accounts\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_local_accounts and self._get_disable_local_accounts(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --disable-local-accounts and \"\n \"--enable-local-accounts at the same time.\"\n )\n return enable_local_accounts",
"def _get_enable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_keyvault_kms = self.raw_param.get(\"enable_azure_keyvault_kms\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"security_profile\") and # backward compatibility\n self.mc.security_profile and\n self.mc.security_profile.azure_key_vault_kms\n ):\n enable_azure_keyvault_kms = self.mc.security_profile.azure_key_vault_kms.enabled\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if bool(enable_azure_keyvault_kms) != bool(self._get_azure_keyvault_kms_key_id(enable_validation=False)):\n raise RequiredArgumentMissingError(\n 'You must set \"--enable-azure-keyvault-kms\" and \"--azure-keyvault-kms-key-id\" at the same time.'\n )\n\n return enable_azure_keyvault_kms",
"def _get_enable_rbac(self, enable_validation: bool = False) -> Union[bool, None]:\n # read the original value passed by the command\n enable_rbac = self.raw_param.get(\"enable_rbac\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.enable_rbac is not None\n ):\n enable_rbac = self.mc.enable_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_rbac and self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\"specify either '--disable-rbac' or '--enable-rbac', not both.\")\n return enable_rbac",
"def get_appgw_id(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID = addon_consts.get(\"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID\")\n\n # read the original value passed by the command\n appgw_id = self.raw_param.get(\"appgw_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID) is not None\n ):\n appgw_id = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_id",
"def _get_enable_aad(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_aad = self.raw_param.get(\"enable_aad\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.managed is not None\n ):\n enable_aad = self.mc.aad_profile.managed\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n enable_validation=False\n )\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if enable_aad:\n if check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-aad\" if managed AAD is already enabled'\n )\n return enable_aad",
"def get_appgw_name(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME = addon_consts.get(\n \"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME\"\n )\n\n # read the original value passed by the command\n appgw_name = self.raw_param.get(\"appgw_name\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME) is not None\n ):\n appgw_name = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_name",
"def GetAGWWindowStyleFlag(self):\r\n\r\n agwStyle = self._agwStyle\r\n if self._main_win:\r\n agwStyle |= self._main_win.GetAGWWindowStyleFlag()\r\n \r\n return agwStyle",
"def get_enable_sgxquotehelper(self) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_CONFCOM_ADDON_NAME = addon_consts.get(\"CONST_CONFCOM_ADDON_NAME\")\n CONST_ACC_SGX_QUOTE_HELPER_ENABLED = addon_consts.get(\"CONST_ACC_SGX_QUOTE_HELPER_ENABLED\")\n\n # read the original value passed by the command\n enable_sgxquotehelper = self.raw_param.get(\"enable_sgxquotehelper\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_CONFCOM_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_CONFCOM_ADDON_NAME\n ).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) is not None\n ):\n enable_sgxquotehelper = self.mc.addon_profiles.get(\n CONST_CONFCOM_ADDON_NAME\n ).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) == \"true\"\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return enable_sgxquotehelper",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True",
"def get_enable_aad(self) -> bool:\n\n return self._get_enable_aad(enable_validation=True)",
"def get_enable_azure_keyvault_kms(self) -> bool:\n return self._get_enable_azure_keyvault_kms(enable_validation=True)",
"def _GetEnableOsLoginValue(self, metadata_dict):\n instance_data, project_data = self._GetInstanceAndProjectAttributes(\n metadata_dict)\n instance_value = instance_data.get('enable-oslogin')\n project_value = project_data.get('enable-oslogin')\n value = instance_value or project_value or ''\n\n return value.lower() == 'true'",
"def GetAGWWindowStyleFlag(self):\r\n\r\n return self._agwFlags"
] |
[
"0.86491394",
"0.65857303",
"0.5831459",
"0.5364812",
"0.5286258",
"0.5143917",
"0.51292014",
"0.51047313",
"0.4965404",
"0.49371117",
"0.48884314",
"0.48789746",
"0.48789746",
"0.48789746",
"0.48789746",
"0.4873115",
"0.4856825",
"0.47568983",
"0.47324654",
"0.4715688",
"0.46679425",
"0.46663046",
"0.4663255",
"0.4565145",
"0.45322615",
"0.45136285",
"0.45029938",
"0.44823644",
"0.44582945",
"0.4420986"
] |
0.8462711
|
1
|
Internal function to obtain the values of gmsa_dns_server and gmsa_root_domain_name. This function supports the option of enable_validation. Please refer to function __validate_gmsa_options for details of validation.
|
def _get_gmsa_dns_server_and_root_domain_name(self, enable_validation: bool = False):
# gmsa_dns_server
# read the original value passed by the command
gmsa_dns_server = self.raw_param.get("gmsa_dns_server")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
gmsa_dns_read_from_mc = False
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.windows_profile and
hasattr(self.mc.windows_profile, "gmsa_profile") and # backward compatibility
self.mc.windows_profile.gmsa_profile and
self.mc.windows_profile.gmsa_profile.dns_server is not None
):
gmsa_dns_server = self.mc.windows_profile.gmsa_profile.dns_server
gmsa_dns_read_from_mc = True
# gmsa_root_domain_name
# read the original value passed by the command
gmsa_root_domain_name = self.raw_param.get("gmsa_root_domain_name")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
gmsa_root_read_from_mc = False
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.windows_profile and
hasattr(self.mc.windows_profile, "gmsa_profile") and # backward compatibility
self.mc.windows_profile.gmsa_profile and
self.mc.windows_profile.gmsa_profile.root_domain_name is not None
):
gmsa_root_domain_name = self.mc.windows_profile.gmsa_profile.root_domain_name
gmsa_root_read_from_mc = True
# consistent check
if gmsa_dns_read_from_mc != gmsa_root_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of gmsa_dns_server and gmsa_root_domain_name "
"is read from the `mc` object."
)
# this parameter does not need dynamic completion
# validation
if enable_validation:
self.__validate_gmsa_options(
self._get_enable_windows_gmsa(enable_validation=False),
gmsa_dns_server,
gmsa_root_domain_name,
self.get_yes(),
)
return gmsa_dns_server, gmsa_root_domain_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_gmsa_dns_server_and_root_domain_name(self) -> Tuple[Union[str, None], Union[str, None]]:\n return self._get_gmsa_dns_server_and_root_domain_name(enable_validation=True)",
"def __validate_gmsa_options(\n self,\n enable_windows_gmsa,\n gmsa_dns_server,\n gmsa_root_domain_name,\n yes,\n ) -> None:\n if enable_windows_gmsa:\n if gmsa_dns_server is None and gmsa_root_domain_name is None:\n msg = (\n \"Please assure that you have set the DNS server in the vnet used by the cluster \"\n \"when not specifying --gmsa-dns-server and --gmsa-root-domain-name\"\n )\n if not yes and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException()\n elif not all([gmsa_dns_server, gmsa_root_domain_name]):\n raise RequiredArgumentMissingError(\n \"You must set or not set --gmsa-dns-server and --gmsa-root-domain-name at the same time.\"\n )\n else:\n if any([gmsa_dns_server, gmsa_root_domain_name]):\n raise RequiredArgumentMissingError(\n \"You only can set --gmsa-dns-server and --gmsa-root-domain-name \"\n \"when setting --enable-windows-gmsa.\"\n )",
"def dns_server(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns_server\")",
"def _get_enable_windows_gmsa(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_windows_gmsa = self.raw_param.get(\"enable_windows_gmsa\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.windows_profile and\n hasattr(self.mc.windows_profile, \"gmsa_profile\") and # backward compatibility\n self.mc.windows_profile.gmsa_profile and\n self.mc.windows_profile.gmsa_profile.enabled is not None\n ):\n enable_windows_gmsa = self.mc.windows_profile.gmsa_profile.enabled\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n (\n gmsa_dns_server,\n gmsa_root_domain_name,\n ) = self._get_gmsa_dns_server_and_root_domain_name(\n enable_validation=False\n )\n self.__validate_gmsa_options(\n enable_windows_gmsa, gmsa_dns_server, gmsa_root_domain_name, self.get_yes()\n )\n return enable_windows_gmsa",
"def get_valid_domains():\n msg = ''\n import glob\n validDomains = []\n for f in glob.glob('{}/*'.format(OPT_MANAGER_RESOURCES_PGAAS)):\n try:\n with open(f, \"r\") as fp:\n try:\n tmpdata = json.load(fp)\n if 'pubkey' in tmpdata:\n validDomains.append(os.path.basename(f))\n except: # pylint: disable=bare-except\n pass\n except: # pylint: disable=bare-except\n pass\n if len(validDomains) == 0:\n msg += '\\nNo valid PostgreSQL cluster information was found'\n else:\n msg += '\\nThese are the valid PostgreSQL cluster domains found on this manager:'\n for v in validDomains:\n msg += '\\n\\t\"{}\"'.format(v)\n return msg",
"def get_dns(self):\n return self.mycam.devicemgmt.GetDNS()",
"def dns_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"dns_servers\")",
"def _get_fqdn_subdomain(self, enable_validation: bool = False) -> Union[str, None]:\n # read the original value passed by the command\n fqdn_subdomain = self.raw_param.get(\"fqdn_subdomain\")\n # try to read the property value corresponding to the parameter from the `mc` object\n # Backward Compatibility: We also support api version v2020.11.01 in profile 2020-09-01-hybrid and there is\n # no such attribute.\n if (\n self.mc and\n hasattr(self.mc, \"fqdn_subdomain\") and\n self.mc.fqdn_subdomain is not None\n ):\n fqdn_subdomain = self.mc.fqdn_subdomain\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if fqdn_subdomain:\n if self._get_dns_name_prefix(read_only=True):\n raise MutuallyExclusiveArgumentError(\n \"--dns-name-prefix and --fqdn-subdomain cannot be used at same time\"\n )\n private_dns_zone = self._get_private_dns_zone(enable_validation=False)\n if private_dns_zone:\n if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:\n if not is_valid_resource_id(private_dns_zone):\n raise InvalidArgumentValueError(\n private_dns_zone + \" is not a valid Azure resource ID.\"\n )\n else:\n raise InvalidArgumentValueError(\n \"--fqdn-subdomain should only be used for private cluster with custom private dns zone\"\n )\n return fqdn_subdomain",
"def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetDnsServers', self.handle))",
"def GetDomainName(self):\n try:\n return self.server.GetDomainName()\n except dbus.DBusException:\n return None",
"def get_dns_server_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/dns-setting/servers\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def _get_dns_name_prefix(\n self, enable_validation: bool = False, read_only: bool = False\n ) -> Union[str, None]:\n # read the original value passed by the command\n dns_name_prefix = self.raw_param.get(\"dns_name_prefix\")\n # try to read the property value corresponding to the parameter from the `mc` object\n read_from_mc = False\n if self.mc and self.mc.dns_prefix is not None:\n dns_name_prefix = self.mc.dns_prefix\n read_from_mc = True\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return dns_name_prefix\n\n dynamic_completion = False\n # check whether the parameter meet the conditions of dynamic completion\n if not dns_name_prefix and not self._get_fqdn_subdomain(enable_validation=False):\n dynamic_completion = True\n # disable dynamic completion if the value is read from `mc`\n dynamic_completion = dynamic_completion and not read_from_mc\n # In case the user does not specify the parameter and it meets the conditions of automatic completion,\n # necessary information is dynamically completed.\n if dynamic_completion:\n name = self.get_name()\n resource_group_name = self.get_resource_group_name()\n subscription_id = self.get_subscription_id()\n # Use subscription id to provide uniqueness and prevent DNS name clashes\n name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]\n if not name_part[0].isalpha():\n name_part = (str('a') + name_part)[0:10]\n resource_group_part = re.sub(\n '[^A-Za-z0-9-]', '', resource_group_name)[0:16]\n dns_name_prefix = '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])\n\n # validation\n if enable_validation:\n if dns_name_prefix and self._get_fqdn_subdomain(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--dns-name-prefix and --fqdn-subdomain cannot be used at same time\"\n )\n return dns_name_prefix",
"def get_dns_name_prefix(self) -> Union[str, None]:\n return self._get_dns_name_prefix(enable_validation=True)",
"def default_server_info():\n # If not set or purposely set = None, then set default\n if MDC.get('server') is None:\n try:\n server = socket.getfqdn()\n except Exception:\n try:\n server = socket.gethostname()\n except Exception:\n server = ''\n MDC.put('server', server)\n if MDC.get('serverIPAddress') is None:\n try:\n server_ip_address = socket.gethostbyname(MDC.get('server'))\n except Exception:\n server_ip_address = \"\"\n MDC.put('serverIPAddress', server_ip_address)",
"def set_dns(self, pardus_profile):\n\n if pardus_profile.get_name_mode() == \"default\":\n default_nameservers = \";\".join( get_default_nameservers())\n default_nameservers = default_nameservers + \";\" # Make sure addresses end with ';'\n self.ignore_auto_dns = \"true\"\n return str(default_nameservers)\n elif pardus_profile.get_name_mode() == \"custom\":\n name_server = str(pardus_profile.get_name_server())\n name_server = name_server + \";\"\n self.ignore_auto_dns = \"true\"\n return str(name_server)\n else:\n # Nothing done in auto option\n return \"none\"",
"def get_dynamic_dns(self):\n return self.mycam.devicemgmt.GetDynamicDNS()",
"def service_dns_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"service_dns_name\")",
"def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfgNet_GetDnsServers', self.handle))",
"def root_domain_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"root_domain_name\")",
"def selftest_function(opts):\n\n domains_list = get_domains_list(opts)\n ldap = LDAPDomains(opts)\n\n state = \"success\"\n reason = \"N/A\"\n domain = \"N/A\"\n conn = \"\"\n\n for domain_name in domains_list:\n try:\n \"\"\"\n If labels are given to the servers in the app.config `domain_name` will start with 'fn_ldap_utilities:' else if\n labels are not given then `domain_name` will equal 'fn_ldap_utilites'.\n If `domain_name` contains ':' then a labels have been given to the servers and `domain` will be set to the label given to the server else\n if `domain_name` does not contain ':' then servers have not been labled and `domain` will be set to `domain_name` which will equal 'fn_ldap_utilities'.\n \"\"\"\n domain = domain_name[domain_name.index(\":\")+1:] if \":\" in domain_name else domain_name\n\n # Instansiate helper (which gets appconfigs from file)\n helper = LDAPUtilitiesHelper(ldap.ldap_domain_name_test(domain, domains_list))\n\n options = opts.get(domain_name, {})\n\n log.info(f\"Verifying app.config values for {str(options.get('ldap_server'))} config section\")\n\n # Instansiate LDAP Server and Connection\n conn = helper.get_ldap_connection()\n\n # Bind to the connection\n log.info(\"Verifying LDAP connection...\")\n conn.bind()\n\n log.info(\"Test was successful\\n\")\n except Exception as err:\n state = \"failure\"\n reason = err\n break\n\n finally:\n # Unbind connection\n if conn:\n conn.unbind()\n\n if state == \"success\":\n return {\"state\": state}\n\n return {\n \"state\": state,\n \"reason\": reason,\n \"domain\": domain\n }",
"def service_dns_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_dns_name\")",
"def cluster_dns_domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_dns_domain\")",
"def kerberos_domain(self):\n return hookenv.config('kerberos-domain')",
"def GetGlobalDNSAddresses(self):\n return (misc.noneToString(self.dns1), misc.noneToString(self.dns2),\n misc.noneToString(self.dns3))",
"def dns(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns\")",
"def get_dns_details(self):\n self.clear_screen()\n zone_file = input('specify zone file \\n'\n 'default [/var/named/ocp.zones]: ')\n default = '/var/named/ocp.zones'\n zone_file = set_values(zone_file, default)\n cluster_name = input('specify cluster name \\n'\n 'default [ocp]: ')\n default = 'ocp'\n cluster_name = set_values(cluster_name, default)\n logging.info('adding zone_file: {} cluster: {}'.format(zone_file, cluster_name))\n self.inventory_dict['csah']['vars']['default_zone_file'] = zone_file\n self.inventory_dict['csah']['vars']['cluster'] = cluster_name",
"def subject_alt_domains(self):\n\n return self._get_subject_alt('dns_name')",
"def get_server_name(self):\n configured_value = self.charm_config[\"server-name\"]\n if configured_value:\n return configured_value\n else:\n fqdn = socket.getfqdn()\n return fqdn",
"def get_possible_domain(self):\n return self.possible_domain",
"def get_name_servers(self, \n ipv4_gateway_mac: str = '01:23:45:67:89:0a',\n ipv6_gateway_mac: str = '01:23:45:67:89:0b',\n domain: str = 'google.com') -> List[Dict[str, str]]:\n\n # region Clear results list\n ns_servers: List[Dict[str, str]] = list()\n self.results.clear()\n # endregion\n\n # region Start sniffer\n if not self.quiet:\n self.base.print_info('Get NS records of domain: ' + domain + ' ...')\n self._sniff_start(self.your_mac_address, self.your_ipv4_address, self.your_ipv6_address, 53)\n # endregion\n\n # region Send DNS queries\n raw_socket: socket = socket(AF_PACKET, SOCK_RAW)\n raw_socket.bind((self.network_interface, 0))\n\n name_servers_addresses = self.base.get_system_name_servers()\n for name_server_address in name_servers_addresses:\n if self.base.ip_address_validation(name_server_address):\n if self.base.ip_address_in_network(name_server_address, self.your_ipv4_network):\n name_server_mac: str = self.arp_scan.get_mac_address(self.network_interface, name_server_address)\n else:\n name_server_mac: str = ipv4_gateway_mac\n dns_query = self.dns.make_ns_query(ethernet_src_mac=self.your_mac_address,\n ethernet_dst_mac=name_server_mac,\n ip_src=self.your_ipv4_address,\n ip_dst=name_server_address,\n udp_src_port=randint(2049, 65535),\n udp_dst_port=53,\n transaction_id=randint(1, 65535),\n name=domain)\n raw_socket.send(dns_query)\n # endregion\n\n # region Resolve NS servers\n sleep(5)\n self._sniff_stop()\n\n ns_servers_names: List[str] = list()\n ns_servers_addresses: List[str] = list()\n\n for ns_server in self.results:\n ns_servers_names.append(ns_server['NS'])\n\n for ns_server_name in ns_servers_names:\n try:\n ns_server_addresses = gethostbyname_ex(ns_server_name)\n if len(ns_server_addresses) > 0:\n for ns_server_address in ns_server_addresses[2]:\n if ns_server_address not in ns_servers_addresses:\n ns_servers_addresses.append(ns_server_address)\n except herror:\n pass\n\n for ns_server_address in ns_servers_addresses:\n if self.base.ip_address_validation(ns_server_address):\n ns_servers.append({'IPv4 address': ns_server_address,\n 'MAC address': ipv4_gateway_mac})\n if self.base.ipv6_address_validation(ns_server_address):\n ns_servers.append({'IPv6 address': ns_server_address,\n 'MAC address': ipv6_gateway_mac})\n\n return ns_servers\n # endregion"
] |
[
"0.805901",
"0.65277654",
"0.54797804",
"0.5408008",
"0.53743374",
"0.5193089",
"0.5179265",
"0.5078613",
"0.50205576",
"0.50150186",
"0.5001028",
"0.4982368",
"0.49672383",
"0.4952367",
"0.49354014",
"0.49327356",
"0.49095985",
"0.49029994",
"0.48969346",
"0.4885461",
"0.48617208",
"0.48543784",
"0.4852651",
"0.48481712",
"0.48112407",
"0.48105633",
"0.4795814",
"0.47679052",
"0.47657284",
"0.47538513"
] |
0.7887675
|
1
|
Internal function to dynamically obtain the values of service_principal and client_secret according to the context. This function supports the option of enable_validation. This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
|
def _get_service_principal_and_client_secret(
self, enable_validation: bool = False, read_only: bool = False
) -> Tuple[Union[str, None], Union[str, None]]:
# service_principal
# read the original value passed by the command
service_principal = self.raw_param.get("service_principal")
# try to read the property value corresponding to the parameter from the `mc` object
sp_read_from_mc = False
if (
self.mc and
self.mc.service_principal_profile and
self.mc.service_principal_profile.client_id is not None
):
service_principal = self.mc.service_principal_profile.client_id
sp_read_from_mc = True
# client_secret
# read the original value passed by the command
client_secret = self.raw_param.get("client_secret")
# try to read the property value corresponding to the parameter from the `mc` object
secret_read_from_mc = False
if (
self.mc and
self.mc.service_principal_profile and
self.mc.service_principal_profile.secret is not None
):
client_secret = self.mc.service_principal_profile.secret
secret_read_from_mc = True
# consistent check
if sp_read_from_mc != secret_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of sp and secret is read from the `mc` object."
)
# skip dynamic completion & validation if option read_only is specified
if read_only:
return service_principal, client_secret
# these parameters do not need dynamic completion
# validation
if enable_validation:
# only one of service_principal and client_secret is provided, not both
if (service_principal or client_secret) and not (service_principal and client_secret):
raise RequiredArgumentMissingError(
"Please provide both --service-principal and --client-secret to use sp as the cluster identity. "
"An sp can be created using the 'az ad sp create-for-rbac' command."
)
return service_principal, client_secret
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_service_principal_and_client_secret(\n self\n ) -> Tuple[Union[str, None], Union[str, None]]:\n return self._get_service_principal_and_client_secret(enable_validation=True)",
"def _obtain_service_account_creds(self) -> service_account.Credentials:\n credentials_json = self._raw_credentials.get(\"credentials_json\")\n admin_email = self._raw_credentials.get(\"email\")\n account_info = self._load_account_info(credentials_json)\n creds = service_account.Credentials.from_service_account_info(account_info, scopes=SCOPES)\n self._creds = creds.with_subject(admin_email)",
"def GenerateConfig(context):\n\n resources = [\n {\n 'name': 'auth_cloud_sql_client_to_cloud_sql_proxy_sa',\n 'type': 'gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding',\n 'properties': {\n 'resource': context.env['project'],\n 'role': 'roles/cloudsql.client',\n 'member': 'serviceAccount:$(ref.cloud-sql-proxy-service-acc.email)'\n },\n }\n ]\n return {'resources': resources}",
"def __init__(__self__, *,\n auth_type: pulumi.Input[str],\n client_id: pulumi.Input[str],\n principal_id: pulumi.Input[str],\n secret: pulumi.Input[str]):\n pulumi.set(__self__, \"auth_type\", 'servicePrincipalSecret')\n pulumi.set(__self__, \"client_id\", client_id)\n pulumi.set(__self__, \"principal_id\", principal_id)\n pulumi.set(__self__, \"secret\", secret)",
"def get_api_credentials(scope, service_account=True):\n\tSTORAGE = file.Storage('oAuth2.json') #local storage of oAuth tokens\n\tcredentials = STORAGE.get()\n\tif credentials is None or credentials.invalid: #check if new oAuth flow is needed\n\t\tif service_account: #server 2 server flow\n##\t\t\twith open(SERVICE_ACCOUNT_FILE) as f:\n##\t\t\t\taccount = json.loads(f.read())\n##\t\t\t\temail = account['client_email']\n##\t\t\t\tkey = account['private_key']\n\t\t\tcredentials = ServiceAccountCredentials.from_json_keyfile_name(SERVICE_ACCOUNT_FILE, scope)\n##\t\t\tcredentials = client.SignedJwtAssertionCredentials(email, key, scope=scope)\n\t\t\tSTORAGE.put(credentials)\n\t\telse: #Application Default Credentials (ADC)\n\t\t\tcredentials = GoogleCredentials.get_application_default()\n\t\t\treturn discovery.build('vision', 'v1', credentials=credentials,\n discoveryServiceUrl=DISCOVERY_URL)\t \n##\t\telse: #normal oAuth2 flow\n##\t\t\tCLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')\n##\t\t\tFLOW = client.flow_from_clientsecrets(CLIENT_SECRETS, scope=scope)\n##\t\t\tPARSER = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser])\n##\t\t\tFLAGS = PARSER.parse_args(sys.argv[1:])\n##\t\t\tcredentials = tools.run_flow(FLOW, STORAGE, FLAGS)\n\t\t\n\treturn credentials",
"def getCustosCredentialFromClientId(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _get_client_id(self, context):\n for key, value in context.invocation_metadata():\n if key == 'client_id':\n return value\n raise Exception('client id not found')",
"def get_oauth_data():",
"def _load_credentials(self, datasource):\n\n self.credentials = datasource.credentials # Access the credentials\n\n # If there are credentials then make the api call\n if self.credentials:\n self.credentials = yaml.load(self.credentials)\n if self._validate_credentials():\n return self.credentials[\"client_id\"], self.credentials[\"client_secret\"]\n\n raise InvalidOrMissingCredentials(\"client_id and client_secret are missing or invalid\")",
"def get_appengine_credentials():\n return get_credentials()",
"def make_oauth_configration_resources_dict():\n config = get_user_config()\n return {\n 'config': config.to_dict(),\n 'oauth_url': oauth.getOauthFlow().step1_get_authorize_url(),\n }",
"def credentials():\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = (os.environ.get('OS_TENANT_NAME') or\n os.environ.get('OS_PROJECT_NAME'))\n auth_url = os.environ.get('OS_AUTH_URL')\n\n config = configparser.RawConfigParser()\n if config.read(_CREDS_FILE):\n username = username or config.get('admin', 'user')\n password = password or config.get('admin', 'pass')\n tenant_name = tenant_name or config.get('admin', 'tenant')\n auth_url = auth_url or config.get('auth', 'uri')\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'uri': auth_url\n }",
"def get_credentials():\n return ServiceAccountCredentials.from_json_keyfile_dict(SERVICE_ACCOUNT, scopes = SCOPES)",
"def _add_cred_variables(self):\n self.credentialKey = {}\n authInfo = None\n if self.client:\n try:\n authInfo = self.client.getAuthenticatorInfo()\n except VersionMethodError:\n pass\n authArgOpts = dict(help=\"authentication plugin\")\n if authInfo:\n self.authenticatorInfo = AuthenticatorInfo(authInfo)\n authArgOpts['choices'] = self.authenticatorInfo.getAuthNames()\n else:\n self.authenticatorInfo = LegacyAuthenticatorInfo()\n\n var = self.add_variable('auth', (\"-a\", \"--auth\"), authArgOpts,\n envvar='ICAT_AUTH')\n var.postprocess = _post_auth\n for key in self.authenticatorInfo.getCredentialKeys(hide=False):\n self._add_credential_key(key)\n hidden = self.authenticatorInfo.getCredentialKeys(hide=True)\n if hidden:\n var = self.add_variable('promptPass', (\"-P\", \"--prompt-pass\"), \n dict(help=\"prompt for the password\", \n action='store_const', const=True), \n type=boolean, default=False)\n var.postprocess = _post_promptPass\n for key in hidden:\n self._add_credential_key(key, hide=True)",
"def get_credentials(self):\r\n \r\n try:\r\n import argparse\r\n #flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\n if self.noauth == True:\r\n flags = tools.argparser.parse_args(args=['--noauth_local_webserver'])\r\n else:\r\n flags = tools.argparser.parse_args(args=[])\r\n except ImportError:\r\n flags = None \r\n \r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'sheets.googleapis.com-allstarbot.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n secret = Path(self.CLIENT_SECRET_FILE)\r\n if secret.exists():\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n else:\r\n print(\"client_secret.json not found, using env vars\")\r\n if not os.environ.get('client_id') or not os.environ.get('client_secret'): \r\n print(\"env vars client_id and client_secret not found. canceling\")\r\n raise Exception(\"client secret error\")\r\n else:\r\n flow = OAuth2WebServerFlow(\r\n os.environ.get('client_id'),\r\n os.environ.get('client_secret'),\r\n self.SCOPES) \r\n \r\n flow.params['access_type'] = 'offline'\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_client_credentials_intractive(self, client_id, client_secret, persist=False):\n if type(client_id) == unicode:\n client_id = client_id.encode('ascii')\n if type(client_secret) == unicode:\n client_secret = client_secret.encode('ascii')\n \n flow = OAuth2WebServerFlow(client_id, client_secret, self._OAUTH_SCOPE, \n redirect_uri=self._REDIRECT_URI)\n authorize_url = flow.step1_get_authorize_url()\n print 'Go to the following link in your browser: ' + authorize_url\n code = raw_input('Enter verification code: ').strip()\n credentials = flow.step2_exchange(code)\n \n if persist:\n self.store_client_credentials(client_id, credentials)\n \n return credentials",
"def servicenow_sspm_wsdl_request_enforce_basic_auth_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str):\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n\n # Name of the property to evaluate against\n evalTarget = \"glide.basicauth.required.wsdl\"\n # Get cached props\n sysPropCache = get_servicenow_sys_properties(cache)\n\n # There should not ever be a duplicate system property, use next() and a list comprehension to check if the\n # property we're evaluating is in the list of properties we get from the cache. If it is NOT then set the\n # value as `False` and we can fill in fake values. Not having a property for security hardening is the same\n # as a failed finding with a lot less fan fair\n propFinder = next((sysprop for sysprop in sysPropCache if sysprop[\"name\"] == evalTarget), False)\n # If we cannot find the property set \"NOT_CONFIGURED\" which will fail whatever the value should be\n if propFinder == False:\n propertyValue = \"NOT_CONFIGURED\"\n propDescription = \"\"\n propId = \"\"\n propCreatedOn = \"\"\n propCreatedBy = \"\"\n propUpdatedOn = \"\"\n propUpdatedBy = \"\"\n propScope = \"\"\n assetB64 = None\n else:\n propertyValue = str(propFinder[\"value\"])\n propDescription = str(propFinder[\"description\"]).replace(\"\\n \", \"\")\n propId = str(propFinder[\"sys_id\"])\n propCreatedOn = str(propFinder[\"sys_created_on\"])\n propCreatedBy = str(propFinder[\"sys_created_by\"])\n propUpdatedOn = str(propFinder[\"sys_updated_on\"])\n propUpdatedBy = str(propFinder[\"sys_updated_by\"])\n propScope = str(propFinder[\"sys_scope\"][\"value\"])\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(propFinder,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson) \n # NOTE: This is where the check evaluation happens - in SNOW these may be Bools or Numbers but will come back as Strings\n # always evaluate a failing condition first which should be the OPPOSITE of the SNOW reccomendation as sometimes the values\n # are not a simple Boolean expression\n if propertyValue != \"true\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.24] Instance should enforce basic authentication for WSDL requests\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does not enforce basic authentication for WSDL requests. Use the 'glide.basicauth.required.wsdl' property to designate if incoming WSDL (Web Services Description Language) requests should require basic authentication. Note: If you choose not to require basic authentication for incoming WSDL requests, you must modify Access Control (ACL) rules to enable guest users to access the WSDL content. Without appropriate authorization configured on the WSDL web services, an unauthorized user can get access to sensitive WSDL content/data on the target instance. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the WSDL request authorization (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/wsdl-request-authorization.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.24] Instance should enforce basic authentication for WSDL requests\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does enforce basic authentication for WSDL requests.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the WSDL request authorization (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/wsdl-request-authorization.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding",
"def _get_credentials(flags):\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-visualizerhelptext.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n store = Storage(CREDENTIAL_PATH)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n return credentials",
"def get_credentials(service, sandbox=True):\n srv = service.lower()\n srv_param = resolve_service(srv)\n if srv_param is None:\n return\n\n client_id, client_secret, scope, storage = srv_param\n if srv == 'evernote':\n return evernote_auth(client_id, client_secret, storage, sandbox)\n else:\n return google_auth(client_id, client_secret, scope, storage)",
"def get_credentials():\n # normal, sane way of doing this that really shouldn't be changed\n #home_dir = os.path.expanduser('~')\n #credential_dir = os.path.join(home_dir, '.credentials')\n #if not os.path.exists(credential_dir):\n # os.makedirs(credential_dir)\n #credential_path = os.path.join(credential_dir,'calendar-python-quickstart.json')\n\n # stupid hacky way that I came up with to fix an issue with running this app as root\n credential_path = os.path.join('./credentials','calendar-python-quickstart.json') \n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_integrations_credentials(self, **kwargs):\n\n all_params = ['page_number', 'page_size']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_integrations_credentials\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/integrations/credentials'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page_number' in params:\n query_params['pageNumber'] = params['page_number']\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CredentialInfoListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def generate_client_credentials(confidential):\n client_id = random_str(40)\n client_secret = None\n hashed_secret = None\n if confidential:\n client_secret = random_str(55)\n hashed_secret = bcrypt.hashpw(\n client_secret.encode(\"utf-8\"), bcrypt.gensalt()\n ).decode(\"utf-8\")\n return client_id, client_secret, hashed_secret",
"def get_credentials(client_secrets='client_secrets.json',\n scope_='https://www.googleapis.com/auth/drive',\n redirect_uri_='http://localhost:8080'):\n flow = client.flow_from_clientsecrets(client_secrets,\n scope=scope_,\n redirect_uri=redirect_uri_)\n credentials = tools.run_flow(flow, Store(), None)\n return credentials",
"def _get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n self, enable_validation: bool = False\n ) -> Tuple[Union[str, None], Union[str, None], Union[str, None]]:\n # get aad profile from `mc`\n aad_profile = None\n if self.mc:\n aad_profile = self.mc.aad_profile\n\n # read the original value passed by the command\n aad_client_app_id = self.raw_param.get(\"aad_client_app_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.client_app_id is not None:\n aad_client_app_id = aad_profile.client_app_id\n\n # read the original value passed by the command\n aad_server_app_id = self.raw_param.get(\"aad_server_app_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.server_app_id is not None:\n aad_server_app_id = aad_profile.server_app_id\n\n # read the original value passed by the command\n aad_server_app_secret = self.raw_param.get(\"aad_server_app_secret\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.server_app_secret is not None:\n aad_server_app_secret = aad_profile.server_app_secret\n\n # these parameters do not need dynamic completion\n\n # validation\n if enable_validation:\n enable_aad = self._get_enable_aad(enable_validation=False)\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n return aad_client_app_id, aad_server_app_id, aad_server_app_secret",
"def _get_client_details():\n with open(CLIENT_DETAILS_FILE) as f:\n client_details = json.load(f)\n client_id = client_details['client_id']\n client_secret = client_details['client_secret']\n\n return client_id, client_secret",
"def client_basic_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_basic_secret\")",
"def client_basic_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_basic_secret\")",
"def credentials(self) -> Mapping:",
"def auth(secrets: Dict) -> ClientSecretCredential:\n\n try:\n credential = ClientSecretCredential(\n tenant_id=secrets.get('tenant_id'),\n client_id=secrets.get('client_id'),\n client_secret=secrets.get('client_secret'),\n authority=urlparse(secrets.get('cloud').endpoints.active_directory).hostname\n )\n except ValueError as e:\n raise InterruptExecution(str(e))\n yield credential"
] |
[
"0.66582423",
"0.565604",
"0.5578831",
"0.54712915",
"0.53976566",
"0.5392976",
"0.53325087",
"0.53316975",
"0.53104705",
"0.5297007",
"0.52847713",
"0.5268223",
"0.52422273",
"0.52220094",
"0.5221576",
"0.5221311",
"0.5186103",
"0.5183048",
"0.517358",
"0.5161288",
"0.51548105",
"0.5147325",
"0.51458585",
"0.51260865",
"0.51222426",
"0.50948626",
"0.50909543",
"0.50909543",
"0.50610644",
"0.50556475"
] |
0.7199387
|
0
|
Dynamically obtain the values of service_principal and client_secret according to the context.
|
def get_service_principal_and_client_secret(
self
) -> Tuple[Union[str, None], Union[str, None]]:
return self._get_service_principal_and_client_secret(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_client_id(self, context):\n for key, value in context.invocation_metadata():\n if key == 'client_id':\n return value\n raise Exception('client id not found')",
"def _get_service_principal_and_client_secret(\n self, enable_validation: bool = False, read_only: bool = False\n ) -> Tuple[Union[str, None], Union[str, None]]:\n # service_principal\n # read the original value passed by the command\n service_principal = self.raw_param.get(\"service_principal\")\n # try to read the property value corresponding to the parameter from the `mc` object\n sp_read_from_mc = False\n if (\n self.mc and\n self.mc.service_principal_profile and\n self.mc.service_principal_profile.client_id is not None\n ):\n service_principal = self.mc.service_principal_profile.client_id\n sp_read_from_mc = True\n\n # client_secret\n # read the original value passed by the command\n client_secret = self.raw_param.get(\"client_secret\")\n # try to read the property value corresponding to the parameter from the `mc` object\n secret_read_from_mc = False\n if (\n self.mc and\n self.mc.service_principal_profile and\n self.mc.service_principal_profile.secret is not None\n ):\n client_secret = self.mc.service_principal_profile.secret\n secret_read_from_mc = True\n\n # consistent check\n if sp_read_from_mc != secret_read_from_mc:\n raise CLIInternalError(\n \"Inconsistent state detected, one of sp and secret is read from the `mc` object.\"\n )\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return service_principal, client_secret\n\n # these parameters do not need dynamic completion\n\n # validation\n if enable_validation:\n # only one of service_principal and client_secret is provided, not both\n if (service_principal or client_secret) and not (service_principal and client_secret):\n raise RequiredArgumentMissingError(\n \"Please provide both --service-principal and --client-secret to use sp as the cluster identity. \"\n \"An sp can be created using the 'az ad sp create-for-rbac' command.\"\n )\n return service_principal, client_secret",
"def GenerateConfig(context):\n\n resources = [\n {\n 'name': 'auth_cloud_sql_client_to_cloud_sql_proxy_sa',\n 'type': 'gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding',\n 'properties': {\n 'resource': context.env['project'],\n 'role': 'roles/cloudsql.client',\n 'member': 'serviceAccount:$(ref.cloud-sql-proxy-service-acc.email)'\n },\n }\n ]\n return {'resources': resources}",
"def __init__(__self__, *,\n auth_type: pulumi.Input[str],\n client_id: pulumi.Input[str],\n principal_id: pulumi.Input[str],\n secret: pulumi.Input[str]):\n pulumi.set(__self__, \"auth_type\", 'servicePrincipalSecret')\n pulumi.set(__self__, \"client_id\", client_id)\n pulumi.set(__self__, \"principal_id\", principal_id)\n pulumi.set(__self__, \"secret\", secret)",
"def credentials():\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = (os.environ.get('OS_TENANT_NAME') or\n os.environ.get('OS_PROJECT_NAME'))\n auth_url = os.environ.get('OS_AUTH_URL')\n\n config = configparser.RawConfigParser()\n if config.read(_CREDS_FILE):\n username = username or config.get('admin', 'user')\n password = password or config.get('admin', 'pass')\n tenant_name = tenant_name or config.get('admin', 'tenant')\n auth_url = auth_url or config.get('auth', 'uri')\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'uri': auth_url\n }",
"def _obtain_service_account_creds(self) -> service_account.Credentials:\n credentials_json = self._raw_credentials.get(\"credentials_json\")\n admin_email = self._raw_credentials.get(\"email\")\n account_info = self._load_account_info(credentials_json)\n creds = service_account.Credentials.from_service_account_info(account_info, scopes=SCOPES)\n self._creds = creds.with_subject(admin_email)",
"def getCustosCredentialFromClientId(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_appengine_credentials():\n return get_credentials()",
"def get_oauth_data():",
"def generate_client_credentials(confidential):\n client_id = random_str(40)\n client_secret = None\n hashed_secret = None\n if confidential:\n client_secret = random_str(55)\n hashed_secret = bcrypt.hashpw(\n client_secret.encode(\"utf-8\"), bcrypt.gensalt()\n ).decode(\"utf-8\")\n return client_id, client_secret, hashed_secret",
"def extract_user_info(client_config):\n # test if there isn't a system user or if there isn't a name for that\n # user, return None\n if ('system user' not in client_config or\n 'name' not in client_config['system user']):\n return None\n\n user_info = dict()\n user_info['system_key'] = dict(\n user=client_config['system user']['name'],\n access_key=client_config['system user']['access key'],\n secret_key=client_config['system user']['secret key'],\n )\n return user_info",
"def client_secret(self) -> str:",
"def auth(secrets: Dict) -> ClientSecretCredential:\n\n try:\n credential = ClientSecretCredential(\n tenant_id=secrets.get('tenant_id'),\n client_id=secrets.get('client_id'),\n client_secret=secrets.get('client_secret'),\n authority=urlparse(secrets.get('cloud').endpoints.active_directory).hostname\n )\n except ValueError as e:\n raise InterruptExecution(str(e))\n yield credential",
"def _get_client_details():\n with open(CLIENT_DETAILS_FILE) as f:\n client_details = json.load(f)\n client_id = client_details['client_id']\n client_secret = client_details['client_secret']\n\n return client_id, client_secret",
"def get_credentials(service, sandbox=True):\n srv = service.lower()\n srv_param = resolve_service(srv)\n if srv_param is None:\n return\n\n client_id, client_secret, scope, storage = srv_param\n if srv == 'evernote':\n return evernote_auth(client_id, client_secret, storage, sandbox)\n else:\n return google_auth(client_id, client_secret, scope, storage)",
"def client_secret(self) -> str:\n return self.get_env_var(self.client_secret_var)",
"def client_secret(self) -> str:\n return self.get_env_var(self.client_secret_var)",
"def get_auth(context):\n\n headers = context['headers']\n auth_info = {\n \"type\": \"basic\",\n \"basic\": {\n \"user\": headers['api_key'],\n \"password\": \"X\"\n }\n }\n auth = Auth().get_auth(auth_info)\n\n return auth",
"def get_credentials():\n return ServiceAccountCredentials.from_json_keyfile_dict(SERVICE_ACCOUNT, scopes = SCOPES)",
"def _load_credentials(self, datasource):\n\n self.credentials = datasource.credentials # Access the credentials\n\n # If there are credentials then make the api call\n if self.credentials:\n self.credentials = yaml.load(self.credentials)\n if self._validate_credentials():\n return self.credentials[\"client_id\"], self.credentials[\"client_secret\"]\n\n raise InvalidOrMissingCredentials(\"client_id and client_secret are missing or invalid\")",
"def get_context():\n context = {}\n cfg = load_service_config(\"lighttpd\")\n ip = \"127.0.0.1\"\n enable_caching = False\n try:\n mconfig = load_service_mconfig_as_json('lighttpd')\n enable_caching = mconfig.enable_caching\n except LoadConfigError:\n logging.info(\"Using default values for service 'lighttpd'\")\n\n if enable_caching:\n ip = get_ip_from_if(cfg['interface'])\n\n context['interface_ip'] = ip\n context['store_root'] = cfg['store_root']\n\n return context",
"def _get_client_info():\n if hasattr(request.authorization, 'username'):\n auth_user = request.authorization.username\n else:\n auth_user = 'Unknown'\n info = request.headers\n origin_string = info.get(\"User-Agent\", \"\")\n origin_props = {}\n if origin_string:\n try:\n origin_props = dict(\n [_.split(\"/\", 1) for _ in origin_string.split()]\n )\n except ValueError:\n pass\n prog_name = origin_props.get(\"prog_name\", \"Unknown\")\n uuid = origin_props.get(\"uuid\", uuid4())\n host = info.get(\"Host\", \"Unknown\")\n if info.get(\"From\") and \"@\" in info[\"From\"]:\n user = info[\"From\"].split(\"@\")[0]\n else:\n user = (\"Unknown\")\n return auth_user, prog_name, user, host, uuid",
"def get_credentials(env=\"development\") -> dict:\n load_dotenv()\n credentials = {}\n\n credentials[\"AWS_ACCESS_KEY_ID\"] = os.getenv(\"DEV_AWS_ACCESS_KEY_ID\")\n credentials[\"AWS_SECRET_ACCESS_KEY\"] = os.getenv(\n \"DEV_AWS_SECRET_ACCESS_KEY\")\n credentials[\"AWS_REGION\"] = os.getenv(\"DEV_AWS_REGION\")\n\n if env == \"production\":\n credentials[\"AWS_ACCESS_KEY_ID\"] = os.getenv(\"PROD_AWS_ACCESS_KEY_ID\")\n credentials[\"AWS_SECRET_ACCESS_KEY\"] = os.getenv(\n \"PROD_AWS_SECRET_ACCESS_KEY\")\n credentials[\"AWS_REGION\"] = os.getenv(\"PROD_AWS_REGION\")\n\n return credentials",
"def make_oauth_configration_resources_dict():\n config = get_user_config()\n return {\n 'config': config.to_dict(),\n 'oauth_url': oauth.getOauthFlow().step1_get_authorize_url(),\n }",
"def credentials(self) -> Mapping:",
"def _obtain_web_app_creds(self) -> Credentials:\n info = {\n \"client_id\": self._raw_credentials.get(\"client_id\"),\n \"client_secret\": self._raw_credentials.get(\"client_secret\"),\n \"refresh_token\": self._raw_credentials.get(\"refresh_token\"),\n }\n creds = Credentials.from_authorized_user_info(info)\n if creds.expired:\n creds.refresh(Request())\n self._creds = creds",
"def _get_credentials(self):\n if self.config_file:\n with open(self.config_file) as f:\n config_str = f.read()\n credentials_dict = json.loads(config_str)\n self.credentials = credentials_dict[self.account][self.auth_type]\n else:\n self.credentials = {\n \"account\": os.environ.get('SNOWSQL_ACCOUNT'),\n \"user\": os.environ.get('SNOWSQL_USER'),\n \"password\": os.environ.get('SNOWSQL_PWD')\n }",
"def from_service_credentials(*,credentials,region):\n # read and check format of service credentials\n if not isinstance(credentials, dict):\n raise Exception(\"service creds must be a dict\")\n for k in ['apikey', 'endpoints', 'resource_instance_id']:\n if not k in credentials:\n raise Exception(\"missing key: \" + k)\n logging.debug(credentials)\n \n # read endpoints from url in service credentials\n response = requests.get(credentials['endpoints'])\n if response.status_code != 200:\n raise Exception(\"error\")\n \n # return parameters\n return {\n 'endpoint': \"https://\" + response.json()['service-endpoints']['regional'][region]['public'][region], \n 'key': credentials['apikey'],\n 'crn': credentials['resource_instance_id']\n }",
"def get_settings_from_client(client):\r\n settings = {\r\n 'username': '',\r\n 'api_key': '',\r\n 'timeout': client.timeout or '',\r\n 'endpoint_url': client.endpoint_url,\r\n }\r\n try:\r\n settings['username'] = client.auth.username\r\n settings['api_key'] = client.auth.api_key\r\n except AttributeError:\r\n pass\r\n\r\n return settings",
"def get_secrets(request):\n secret_keys = (\n 'neptune_sql_credentials',\n 'triton_sql_credentials',\n 'saturn_sql_credentials',\n 'qualtrics_credentials',\n 'rserve_service_account_credentials',\n )\n secrets = {s: json.loads(SecretValue.get(s, 'null'))\n for s in secret_keys}\n\n # Add the mandrill api key, which isn't a JSON string.\n if request.get('send_email', None) != 'false':\n secrets['mandrill_api_key'] = SecretValue.get(\n 'mandrill_api_key', '')\n\n return secrets"
] |
[
"0.60935384",
"0.60134804",
"0.59524554",
"0.59348845",
"0.5754955",
"0.56976146",
"0.56620353",
"0.5611669",
"0.55893457",
"0.557075",
"0.5533861",
"0.5498766",
"0.54349977",
"0.53963697",
"0.53937066",
"0.53483903",
"0.53483903",
"0.53441286",
"0.53438115",
"0.53174794",
"0.5301437",
"0.5294366",
"0.5289653",
"0.52671134",
"0.5265235",
"0.5252818",
"0.52363235",
"0.5227382",
"0.5220768",
"0.52181035"
] |
0.6777169
|
0
|
Obtain the value of skip_subnet_role_assignment.
|
def get_skip_subnet_role_assignment(self) -> bool:
# read the original value passed by the command
skip_subnet_role_assignment = self.raw_param.get("skip_subnet_role_assignment")
# this parameter does not need dynamic completion
# this parameter does not need validation
return skip_subnet_role_assignment
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> str:\n return pulumi.get(self, \"role_arn\")",
"def role_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role_id\")",
"def execution_role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"execution_role_arn\")",
"def subnet_id(self) -> Optional[str]:\n return pulumi.get(self, \"subnet_id\")",
"def get_role(self):\n return self.role",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")",
"def execution_role_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"execution_role_arn\")",
"def execution_role_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"execution_role_arn\")",
"def _get_role(self):\n return self.__role",
"def role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_arn\")",
"def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"role\")",
"def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get('role')",
"def get_task_role(self):\n if self.default_task_role is None and self.delegate is None:\n raise ConfigException(\"No default task role defined on the config model\")\n\n if self.namespace_model_instance is None:\n raise ConfigException(\"ConfigModel instance can't get a default task role from a Namespace model reference without an instance of that model\")\n \n comp_ref = self.namespace_model_instance.get_inst_ref(self.default_task_role)\n comp_ref.fix_arguments()\n return comp_ref.value()",
"def role(self):\n return self._role",
"def role(self):\n return self._role",
"def role(self):\n return self._role",
"def role(self) -> str:\n return pulumi.get(self, \"role\")",
"def role_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_id\")",
"def delivery_channel_assume_role_arn(self) -> str:\n return pulumi.get(self, \"delivery_channel_assume_role_arn\")",
"def delivery_channel_assume_role_arn(self) -> str:\n return pulumi.get(self, \"delivery_channel_assume_role_arn\")"
] |
[
"0.62121415",
"0.62121415",
"0.6066156",
"0.5905377",
"0.5905377",
"0.5905377",
"0.5905377",
"0.57601845",
"0.5713269",
"0.56733364",
"0.5670082",
"0.56682163",
"0.5652695",
"0.5652695",
"0.5652695",
"0.5652012",
"0.561779",
"0.561779",
"0.5596034",
"0.55703914",
"0.5553662",
"0.5537925",
"0.55300605",
"0.5524663",
"0.5524663",
"0.5524663",
"0.54924256",
"0.5474339",
"0.5428886",
"0.5428886"
] |
0.8256094
|
0
|
Internal function to obtain the value of assign_identity. This function supports the option of enable_validation. When enabled, if enable_managed_identity is not specified and assign_identity is assigned, a RequiredArgumentMissingError will be raised. Besides, if assign_identity is not assigned but assign_kubelet_identity is, a RequiredArgumentMissingError will be raised.
|
def _get_assign_identity(self, enable_validation: bool = False) -> Union[str, None]:
# read the original value passed by the command
assign_identity = self.raw_param.get("assign_identity")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.identity and
self.mc.identity.user_assigned_identities is not None
):
value_obtained_from_mc = safe_list_get(
list(self.mc.identity.user_assigned_identities.keys()), 0, None
)
if value_obtained_from_mc is not None:
assign_identity = value_obtained_from_mc
# this parameter does not need dynamic completion
# validation
if enable_validation:
if assign_identity:
if not self._get_enable_managed_identity(enable_validation=False):
raise RequiredArgumentMissingError(
"--assign-identity can only be specified when --enable-managed-identity is specified"
)
else:
if self.decorator_mode == DecoratorMode.CREATE:
if self._get_assign_kubelet_identity(enable_validation=False):
raise RequiredArgumentMissingError(
"--assign-kubelet-identity can only be specified when --assign-identity is specified"
)
return assign_identity
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_assign_kubelet_identity(self, enable_validation: bool = False) -> Union[str, None]:\n # read the original value passed by the command\n assign_kubelet_identity = self.raw_param.get(\"assign_kubelet_identity\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.identity_profile and\n self.mc.identity_profile.get(\"kubeletidentity\", None) and\n getattr(self.mc.identity_profile.get(\"kubeletidentity\"), \"resource_id\") is not None\n ):\n assign_kubelet_identity = getattr(self.mc.identity_profile.get(\"kubeletidentity\"), \"resource_id\")\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if assign_kubelet_identity:\n if self.decorator_mode == DecoratorMode.CREATE and not self._get_assign_identity(\n enable_validation=False\n ):\n raise RequiredArgumentMissingError(\n \"--assign-kubelet-identity can only be specified when --assign-identity is specified\"\n )\n if self.decorator_mode == DecoratorMode.UPDATE:\n msg = (\n \"You're going to update kubelet identity to {}, \"\n \"which will upgrade every node pool in the cluster \"\n \"and might take a while, do you wish to continue?\".format(assign_kubelet_identity)\n )\n if not self.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n if not self.get_assign_identity() and not self.get_user_assignd_identity_from_mc():\n raise RequiredArgumentMissingError(\n \"--assign-identity is not provided and the cluster identity type \"\n \"is not user assigned, cannot update kubelet identity\"\n )\n return assign_kubelet_identity",
"def get_assign_identity(self) -> Union[str, None]:\n\n return self._get_assign_identity(enable_validation=True)",
"def get_assign_kubelet_identity(self) -> Union[str, None]:\n return self._get_assign_kubelet_identity(enable_validation=True)",
"def _get_enable_managed_identity(\n self, enable_validation: bool = False, read_only: bool = False\n ) -> bool:\n # read the original value passed by the command\n enable_managed_identity = self.raw_param.get(\"enable_managed_identity\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object\n read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.identity:\n enable_managed_identity = check_is_msi_cluster(self.mc)\n read_from_mc = True\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return enable_managed_identity\n\n # dynamic completion for create mode only\n if self.decorator_mode == DecoratorMode.CREATE:\n (\n service_principal,\n client_secret,\n ) = self._get_service_principal_and_client_secret(read_only=True)\n if not read_from_mc and service_principal and client_secret:\n enable_managed_identity = False\n\n # validation\n if enable_validation:\n if not enable_managed_identity and self._get_assign_identity(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--assign-identity can only be specified when --enable-managed-identity is specified\"\n )\n return enable_managed_identity",
"def identity(self) -> pulumi.Input['UserAssignedIdentityArgs']:\n return pulumi.get(self, \"identity\")",
"def get_user_assignd_identity_from_mc(self) -> Union[str, None]:\n user_assigned_identity = None\n if self.mc and self.mc.identity and self.mc.identity.user_assigned_identities:\n user_assigned_identity = safe_list_get(list(self.mc.identity.user_assigned_identities.keys()), 0, None)\n return user_assigned_identity",
"def get_identity_by_msi_client(self, assigned_identity: str) -> Identity:\n return self.external_functions.get_user_assigned_identity_by_resource_id(self.cmd.cli_ctx, assigned_identity)",
"def set_up_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity = None\n enable_managed_identity = self.context.get_enable_managed_identity()\n assign_identity = self.context.get_assign_identity()\n if enable_managed_identity and not assign_identity:\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif enable_managed_identity and assign_identity:\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def get_enable_managed_identity(self) -> bool:\n return self._get_enable_managed_identity(enable_validation=True)",
"def get_assignee_from_identity_or_sp_profile(self) -> Tuple[str, bool]:\n assignee = None\n is_service_principal = False\n if check_is_msi_cluster(self.mc):\n if self.mc.identity_profile is None or self.mc.identity_profile[\"kubeletidentity\"] is None:\n raise UnknownError(\n \"Unexpected error getting kubelet's identity for the cluster. \"\n \"Please do not set --attach-acr or --detach-acr. \"\n \"You can manually grant or revoke permission to the identity named \"\n \"<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.\"\n )\n assignee = self.mc.identity_profile[\"kubeletidentity\"].object_id\n is_service_principal = False\n elif self.mc and self.mc.service_principal_profile is not None:\n assignee = self.mc.service_principal_profile.client_id\n is_service_principal = True\n\n if not assignee:\n raise UnknownError('Cannot get the AKS cluster\\'s service principal.')\n return assignee, is_service_principal",
"def primary_user_assigned_identity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"primary_user_assigned_identity\")",
"def primary_user_assigned_identity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"primary_user_assigned_identity\")",
"def get_user_assigned_identity_object_id(self, user_assigned_identity=None) -> str:\n assigned_identity = user_assigned_identity if user_assigned_identity else self.get_assign_identity()\n if assigned_identity is None or assigned_identity == \"\":\n raise RequiredArgumentMissingError(\"No assigned identity provided.\")\n return self.get_identity_by_msi_client(assigned_identity).principal_id",
"def primary_user_assigned_identity(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"primary_user_assigned_identity\")",
"def identity(self) -> Optional[pulumi.Input['IdentityArgs']]:\n return pulumi.get(self, \"identity\")",
"def get_workload_identity_profile(self) -> Optional[ManagedClusterSecurityProfileWorkloadIdentity]:\n enable_workload_identity = self.raw_param.get(\"enable_workload_identity\")\n disable_workload_identity = self.raw_param.get(\"disable_workload_identity\")\n\n if not enable_workload_identity and not disable_workload_identity:\n return None\n\n if enable_workload_identity and disable_workload_identity:\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-workload-identity and \"\n \"--disable-workload-identity at the same time.\"\n )\n\n if not hasattr(self.models, \"ManagedClusterSecurityProfileWorkloadIdentity\"):\n return None\n\n profile = self.models.ManagedClusterSecurityProfileWorkloadIdentity()\n\n if self.decorator_mode == DecoratorMode.CREATE:\n profile.enabled = bool(enable_workload_identity)\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if (\n hasattr(self.mc, \"security_profile\") and\n self.mc.security_profile is not None and\n self.mc.security_profile.workload_identity is not None\n ):\n # reuse previous profile is has been set\n profile = self.mc.security_profile.workload_identity\n\n if enable_workload_identity:\n profile.enabled = True\n elif disable_workload_identity:\n profile.enabled = False\n\n if profile.enabled:\n # in enable case, we need to check if OIDC issuer has been enabled\n oidc_issuer_profile = self.get_oidc_issuer_profile()\n if self.decorator_mode == DecoratorMode.UPDATE and oidc_issuer_profile is None:\n # if the cluster has enabled OIDC issuer before, in update call:\n #\n # az aks update --enable-workload-identity\n #\n # we need to use previous OIDC issuer profile\n oidc_issuer_profile = self.mc.oidc_issuer_profile\n oidc_issuer_enabled = oidc_issuer_profile is not None and oidc_issuer_profile.enabled\n if not oidc_issuer_enabled:\n raise RequiredArgumentMissingError(\n \"Enabling workload identity requires enabling OIDC issuer (--enable-oidc-issuer).\"\n )\n\n return profile",
"def identity(self) -> Optional[pulumi.Input['IdentityInfoArgs']]:\n return pulumi.get(self, \"identity\")",
"def __init__(__self__, *,\n identity_type: Optional[pulumi.Input[Union[str, 'CmkIdentityType']]] = None,\n user_assigned_identity_resource_id: Optional[pulumi.Input[str]] = None):\n if identity_type is not None:\n pulumi.set(__self__, \"identity_type\", identity_type)\n if user_assigned_identity_resource_id is not None:\n pulumi.set(__self__, \"user_assigned_identity_resource_id\", user_assigned_identity_resource_id)",
"def get_user_assigned_identity_client_id(self, user_assigned_identity=None) -> str:\n assigned_identity = user_assigned_identity if user_assigned_identity else self.get_assign_identity()\n if assigned_identity is None or assigned_identity == \"\":\n raise RequiredArgumentMissingError(\"No assigned identity provided.\")\n return self.get_identity_by_msi_client(assigned_identity).client_id",
"def identity(self) -> Optional[pulumi.Input['ClusterIdentityArgs']]:\n return pulumi.get(self, \"identity\")",
"def identity_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_id\")",
"def identity_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_id\")",
"def identity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity\")",
"def identity(self) -> Optional[pulumi.Input['ServiceIdentityArgs']]:\n return pulumi.get(self, \"identity\")",
"def identity(self) -> Optional[pulumi.Input['ServiceIdentityArgs']]:\n return pulumi.get(self, \"identity\")",
"def identity(self) -> Optional['outputs.IdentityPropertiesResponse']:\n return pulumi.get(self, \"identity\")",
"def get_assign(self):\n return self.assign",
"def set_up_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n identity_profile = None\n assign_kubelet_identity = self.context.get_assign_kubelet_identity()\n if assign_kubelet_identity:\n kubelet_identity = self.context.get_identity_by_msi_client(assign_kubelet_identity)\n identity_profile = {\n 'kubeletidentity': self.models.UserAssignedIdentity(\n resource_id=assign_kubelet_identity,\n client_id=kubelet_identity.client_id, # TODO: may remove, rp would take care of this\n object_id=kubelet_identity.principal_id # TODO: may remove, rp would take care of this\n )\n }\n cluster_identity_object_id = self.context.get_user_assigned_identity_object_id()\n # ensure the cluster identity has \"Managed Identity Operator\" role at the scope of kubelet identity\n self.context.external_functions.ensure_cluster_identity_permission_on_kubelet_identity(\n self.cmd,\n cluster_identity_object_id,\n assign_kubelet_identity)\n mc.identity_profile = identity_profile\n return mc",
"def update_identity(self, mc: ManagedCluster) -> ManagedCluster:\n self._ensure_mc(mc)\n\n current_identity_type = \"spn\"\n current_user_assigned_identity = \"\"\n if mc.identity is not None:\n current_identity_type = mc.identity.type.casefold()\n if mc.identity.user_assigned_identities is not None and len(mc.identity.user_assigned_identities) > 0:\n current_user_assigned_identity = list(mc.identity.user_assigned_identities.keys())[0]\n\n goal_identity_type = current_identity_type\n assign_identity = self.context.get_assign_identity()\n if self.context.get_enable_managed_identity():\n if not assign_identity:\n goal_identity_type = \"systemassigned\"\n else:\n goal_identity_type = \"userassigned\"\n\n is_update_identity = ((current_identity_type != goal_identity_type) or\n (current_identity_type == goal_identity_type and\n current_identity_type == \"userassigned\" and\n assign_identity is not None and\n current_user_assigned_identity != assign_identity))\n if is_update_identity:\n if current_identity_type == \"spn\":\n msg = (\n \"Your cluster is using service principal, and you are going to update \"\n \"the cluster to use {} managed identity.\\nAfter updating, your \"\n \"cluster's control plane and addon pods will switch to use managed \"\n \"identity, but kubelet will KEEP USING SERVICE PRINCIPAL \"\n \"until you upgrade your agentpool.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(goal_identity_type)\n elif current_identity_type != goal_identity_type:\n msg = (\n \"Your cluster is already using {} managed identity, and you are going to \"\n \"update the cluster to use {} managed identity.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_identity_type, goal_identity_type)\n else:\n msg = (\n \"Your cluster is already using userassigned managed identity, current control plane identity is {},\"\n \"and you are going to update the cluster identity to {}.\\n\"\n \"Are you sure you want to perform this operation?\"\n ).format(current_user_assigned_identity, assign_identity)\n # gracefully exit if user does not confirm\n if not self.context.get_yes() and not prompt_y_n(msg, default=\"n\"):\n raise DecoratorEarlyExitException\n # update identity\n if goal_identity_type == \"systemassigned\":\n identity = self.models.ManagedClusterIdentity(\n type=\"SystemAssigned\"\n )\n elif goal_identity_type == \"userassigned\":\n user_assigned_identity = {\n assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()\n }\n identity = self.models.ManagedClusterIdentity(\n type=\"UserAssigned\",\n user_assigned_identities=user_assigned_identity\n )\n mc.identity = identity\n return mc",
"def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:\n return pulumi.get(self, \"identity\")"
] |
[
"0.76740116",
"0.7659859",
"0.73053336",
"0.7012938",
"0.6426611",
"0.62798613",
"0.62467384",
"0.6161163",
"0.6086229",
"0.60307527",
"0.6023322",
"0.6023322",
"0.5911304",
"0.58574635",
"0.57308227",
"0.57053065",
"0.56760824",
"0.5666281",
"0.5490419",
"0.5432264",
"0.5421362",
"0.5421362",
"0.538145",
"0.52521265",
"0.52521265",
"0.5249806",
"0.5214702",
"0.5197268",
"0.51711243",
"0.5170351"
] |
0.8478219
|
0
|
Helper function to obtain the identity object by msi client.
|
def get_identity_by_msi_client(self, assigned_identity: str) -> Identity:
return self.external_functions.get_user_assigned_identity_by_resource_id(self.cmd.cli_ctx, assigned_identity)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def client_identity(self) -> Identity:\n raise NotImplementedError",
"def _get_client_id(self, context):\n for key, value in context.invocation_metadata():\n if key == 'client_id':\n return value\n raise Exception('client id not found')",
"def identity(payload):\n user_id = payload['identity']\n return User.find_by_id(user_id)",
"def identity(self) -> Optional[pulumi.Input['ServiceIdentityArgs']]:\n return pulumi.get(self, \"identity\")",
"def identity(self) -> Optional[pulumi.Input['ServiceIdentityArgs']]:\n return pulumi.get(self, \"identity\")",
"def get_identity(self):\n return self.query_serial('*IDN?')",
"def identity(payload):\r\n user_id = payload['identity']\r\n return UserModel.find_by_id(user_id)",
"def get_identity(self):\n return GetIdentity(*self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_GET_IDENTITY, (), '', 33, '8s 8s c 3B 3B H'))",
"def identity(payload):\n user_id = payload['identity']\n try:\n user = User.load(None, user_id)\n except:\n user = None\n return user",
"def identity(payload):\n user_id = payload['identity']\n return UserModel.find_by_id(user_id)",
"def identity(payload):\n user_id = payload['identity']\n return UserModel.find_by_id(user_id)",
"def identity(payload):\n\n user_id = payload['identity']\n return UserModel.find_by_id(user_id)",
"def getIdentity():\n return Sentience.__IDENTITY.lower()",
"def identity(self) -> Optional[pulumi.Input['IdentityInfoArgs']]:\n return pulumi.get(self, \"identity\")",
"def trigger_identity(self, attributionio_id, client_id='', user_agent=''):\n\n try:\n return self._make_public_api_request(\n url=PyttributionIo.PUBLIC_API_URL + 'identities',\n data=self._build_identity_request_data(\n attributionio_id=attributionio_id,\n client_id=client_id,\n user_agent=user_agent,\n )\n )\n except RequestException as e:\n logger.error(\n 'Pyttribution.io: Identity trigger for ID \"{attributionio_id}\" failed with HTTP status {exception}!'.format(\n attributionio_id=attributionio_id,\n exception=e,\n )\n )",
"def current_user_id(data_client):\n try:\n return data_client.current_user().id\n except tk.HTTPError as error:\n skip_or_fail(tk.HTTPError, \"ID of current user could not be retrieved!\", error)",
"def identity(self) -> pulumi.Output[Optional['outputs.ServiceIdentity']]:\n return pulumi.get(self, \"identity\")",
"def identity(self) -> Optional[pulumi.Input['ClusterIdentityArgs']]:\n return pulumi.get(self, \"identity\")",
"def client_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"client_id\")",
"def identifier(self):\n return self._client.identifier",
"def identity(self) -> pulumi.Output[Optional['outputs.IdentityInfoResponse']]:\n return pulumi.get(self, \"identity\")",
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def identity(self) -> pulumi.Input['ClusterIdentityArgs']:\n return pulumi.get(self, \"identity\")",
"async def server_identity(self) -> Identity:\n raise NotImplementedError",
"def client_id(self) -> str:",
"def cluster_identity_get(self, desired_attributes=None):\n return self.request( \"cluster-identity-get\", {\n 'desired_attributes': [ desired_attributes, 'desired-attributes', [ ClusterIdentityInfo, 'None' ], False ],\n }, {\n 'attributes': [ ClusterIdentityInfo, False ],\n } )",
"def identity(self) -> pulumi.Input['UserAssignedIdentityArgs']:\n return pulumi.get(self, \"identity\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")"
] |
[
"0.69976383",
"0.6399398",
"0.61145127",
"0.6106585",
"0.6106585",
"0.6098773",
"0.5980171",
"0.59704405",
"0.5964385",
"0.5925657",
"0.5925657",
"0.5887657",
"0.5863653",
"0.5841463",
"0.58319014",
"0.5789123",
"0.57326764",
"0.5716458",
"0.5710202",
"0.5706894",
"0.56918377",
"0.5690787",
"0.5690787",
"0.5690787",
"0.5689162",
"0.5684812",
"0.56760806",
"0.5664199",
"0.56631905",
"0.5659648"
] |
0.7378616
|
0
|
Helper function to obtain the client_id of user assigned identity.
|
def get_user_assigned_identity_client_id(self, user_assigned_identity=None) -> str:
assigned_identity = user_assigned_identity if user_assigned_identity else self.get_assign_identity()
if assigned_identity is None or assigned_identity == "":
raise RequiredArgumentMissingError("No assigned identity provided.")
return self.get_identity_by_msi_client(assigned_identity).client_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def current_user_id(data_client):\n try:\n return data_client.current_user().id\n except tk.HTTPError as error:\n skip_or_fail(tk.HTTPError, \"ID of current user could not be retrieved!\", error)",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self):\n return self.__client_id",
"def get_identity_by_msi_client(self, assigned_identity: str) -> Identity:\n return self.external_functions.get_user_assigned_identity_by_resource_id(self.cmd.cli_ctx, assigned_identity)",
"def client_id(self):\n\n return self.__client_id",
"def client_id(self) -> str:"
] |
[
"0.75249153",
"0.75249153",
"0.75249153",
"0.75219494",
"0.7492419",
"0.741176",
"0.741176",
"0.741176",
"0.741176",
"0.741176",
"0.741176",
"0.741176",
"0.741176",
"0.741176",
"0.741176",
"0.741176",
"0.7387648",
"0.7386695",
"0.7386695",
"0.7386695",
"0.7386695",
"0.7386695",
"0.7386695",
"0.7386695",
"0.7386695",
"0.7386695",
"0.73813635",
"0.72487617",
"0.72478914",
"0.71805495"
] |
0.7760884
|
0
|
Obtain the value of attach_acr.
|
def get_attach_acr(self) -> Union[str, None]:
# read the original value passed by the command
attach_acr = self.raw_param.get("attach_acr")
# this parameter does not need dynamic completion
# validation
if self.decorator_mode == DecoratorMode.CREATE and attach_acr:
if self._get_enable_managed_identity(enable_validation=False):
# Attach acr operation will be handled after the cluster is created
if self.get_no_wait():
raise MutuallyExclusiveArgumentError(
"When --attach-acr and --enable-managed-identity are both specified, "
"--no-wait is not allowed, please wait until the whole operation succeeds."
)
else:
# newly added check, check whether client_id exists before creating role assignment
service_principal, _ = self._get_service_principal_and_client_secret(read_only=True)
if not service_principal:
raise RequiredArgumentMissingError(
"No service principal provided to create the acrpull role assignment for acr."
)
return attach_acr
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_detach_acr(self) -> Union[str, None]:\n # read the original value passed by the command\n detach_acr = self.raw_param.get(\"detach_acr\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return detach_acr",
"def acr(self) -> Optional[pulumi.Input['ACRArgs']]:\n return pulumi.get(self, \"acr\")",
"def acr(self) -> pulumi.Output[Optional['outputs.ACRResponse']]:\n return pulumi.get(self, \"acr\")",
"def ac(self):\n return self.acWF + self.acN",
"def process_attach_acr(self, mc: ManagedCluster) -> None:\n self._ensure_mc(mc)\n\n attach_acr = self.context.get_attach_acr()\n if attach_acr:\n # If enable_managed_identity, attach acr operation will be handled after the cluster is created\n if not self.context.get_enable_managed_identity():\n service_principal_profile = mc.service_principal_profile\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=service_principal_profile.client_id,\n acr_name_or_id=attach_acr,\n # not actually used\n subscription_id=self.context.get_subscription_id(),\n )",
"def getRA(self):\n return self._ra",
"def read_acbr(self):\n return self.ACBR",
"def ac_dc(self):\n return self._ac_dc",
"def ac(self):\n if self.armor:\n return self.armor.ac\n return 10 + self.dexterity",
"def authority_information_access_value(self):\n\n if self._processed_extensions is False:\n self._set_extensions()\n return self._authority_information_access_value",
"def att_dc(self):\n return self._att_dc",
"def authority_key_identifier_value(self):\n\n if self._processed_extensions is False:\n self._set_extensions()\n return self._authority_key_identifier_value",
"def ad(self):\n # type: () -> int\n return self._ad",
"def ac(self):\n return np.array(self['ac'], dtype=np.float32) / 1000",
"def get_asic_id(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['tracking_id'].attrs['asic_id']\n\t\texcept:\n\t\t\treturn None\n\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True",
"def ae(self):\n return self.server.ae",
"def get_a(self):\n return self._a",
"def ca(self):\n\n return self._basic_constraints['ca'].native",
"def get_AIA(self):\n\n return self.get_POW().getAIA()",
"def getA(self):\n\t\treturn self.a",
"def account_credential_details(self) -> Sequence['outputs.AccountCredentialDetailsResponse']:\n return pulumi.get(self, \"account_credential_details\")",
"def account_credential_details(self) -> Sequence['outputs.AccountCredentialDetailsResponse']:\n return pulumi.get(self, \"account_credential_details\")",
"def AdvertiseSRLB(self):\r\n\t\treturn self._get_attribute('advertiseSRLB')",
"def getAcNum(self):\n\n # stores the integer account number as a formatted 3-digit string (in which 0's occupy unused digits)\n strAcNum = str(\"{self.acNum:03d}\".format(self=self))\n return strAcNum",
"def process_attach_detach_acr(self, mc: ManagedCluster) -> None:\n self._ensure_mc(mc)\n\n subscription_id = self.context.get_subscription_id()\n assignee, is_service_principal = self.context.get_assignee_from_identity_or_sp_profile()\n attach_acr = self.context.get_attach_acr()\n detach_acr = self.context.get_detach_acr()\n\n if attach_acr:\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=assignee,\n acr_name_or_id=attach_acr,\n subscription_id=subscription_id,\n is_service_principal=is_service_principal,\n )\n\n if detach_acr:\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=assignee,\n acr_name_or_id=detach_acr,\n subscription_id=subscription_id,\n detach=True,\n is_service_principal=is_service_principal,\n )",
"def erpac(self):\n return self._erpac",
"def getATR(self, cardService):\n ATR = toHexString(cardService.connection.getATR())\n return ATR",
"def get_asic_temp(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['tracking_id'].attrs['asic_temp']\n\t\texcept:\n\t\t\treturn None",
"def getAttachment(self):\n log_func.warning(u'The method of obtaining the object attached to the anchor control is not defined')\n return None",
"def get_aaguid(self)->UUID:\n return DICEKey.DICEKEY_AUTHENTICATOR_AAGUID"
] |
[
"0.7080891",
"0.68829334",
"0.6811118",
"0.6062928",
"0.587817",
"0.584399",
"0.58427054",
"0.5744695",
"0.5720469",
"0.57051146",
"0.5686506",
"0.5593521",
"0.55094045",
"0.54986906",
"0.5485379",
"0.5434659",
"0.54029405",
"0.53506774",
"0.5339196",
"0.5336396",
"0.53019243",
"0.53019243",
"0.5286945",
"0.52824193",
"0.52692074",
"0.5249137",
"0.523971",
"0.5214689",
"0.52036864",
"0.5191507"
] |
0.7431823
|
0
|
Obtain the value of detach_acr.
|
def get_detach_acr(self) -> Union[str, None]:
# read the original value passed by the command
detach_acr = self.raw_param.get("detach_acr")
# this parameter does not need dynamic completion
# this parameter does not need validation
return detach_acr
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_attach_acr(self) -> Union[str, None]:\n # read the original value passed by the command\n attach_acr = self.raw_param.get(\"attach_acr\")\n\n # this parameter does not need dynamic completion\n # validation\n if self.decorator_mode == DecoratorMode.CREATE and attach_acr:\n if self._get_enable_managed_identity(enable_validation=False):\n # Attach acr operation will be handled after the cluster is created\n if self.get_no_wait():\n raise MutuallyExclusiveArgumentError(\n \"When --attach-acr and --enable-managed-identity are both specified, \"\n \"--no-wait is not allowed, please wait until the whole operation succeeds.\"\n )\n else:\n # newly added check, check whether client_id exists before creating role assignment\n service_principal, _ = self._get_service_principal_and_client_secret(read_only=True)\n if not service_principal:\n raise RequiredArgumentMissingError(\n \"No service principal provided to create the acrpull role assignment for acr.\"\n )\n return attach_acr",
"def process_attach_detach_acr(self, mc: ManagedCluster) -> None:\n self._ensure_mc(mc)\n\n subscription_id = self.context.get_subscription_id()\n assignee, is_service_principal = self.context.get_assignee_from_identity_or_sp_profile()\n attach_acr = self.context.get_attach_acr()\n detach_acr = self.context.get_detach_acr()\n\n if attach_acr:\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=assignee,\n acr_name_or_id=attach_acr,\n subscription_id=subscription_id,\n is_service_principal=is_service_principal,\n )\n\n if detach_acr:\n self.context.external_functions.ensure_aks_acr(\n self.cmd,\n assignee=assignee,\n acr_name_or_id=detach_acr,\n subscription_id=subscription_id,\n detach=True,\n is_service_principal=is_service_principal,\n )",
"def att_dc(self):\n return self._att_dc",
"def ac_dc(self):\n return self._ac_dc",
"def erpac(self):\n return self._erpac",
"def delta_crl_indicator_value(self):\n\n if self._processed_extensions is False:\n self._set_extensions()\n return self._delta_crl_indicator_value",
"def read_acbr(self):\n return self.ACBR",
"def dac(self):\n return self._dac",
"def acr(self) -> Optional[pulumi.Input['ACRArgs']]:\n return pulumi.get(self, \"acr\")",
"def acr(self) -> pulumi.Output[Optional['outputs.ACRResponse']]:\n return pulumi.get(self, \"acr\")",
"def getEndCap( self, c, devChannel ):\n dev = 'endcap'\n self.validateDevChannel( dev, devChannel )\n value = self.dcDict[dev]['devChannels'][devChannel]['value']\n if value is not None: return value\n else: raise DCBoxError( 4 )",
"def getAttachment(self):\n log_func.warning(u'The method of obtaining the object attached to the anchor control is not defined')\n return None",
"def digest(self):\n return self.asset_ids[0]",
"def get_descriptor_val(self):\n return self.descriptor.to_numpy()",
"def credential(self):\n return self._tower.get_credential_by_id(self._data.get('credential'))",
"def credential_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"credential_id\")",
"def mrdc(self) -> str:\n return self._device_info[\"MRDC\"]",
"def credential(self):\n return self._credential",
"def get_value(self, device_name):\n return epics.caget(str(device_name))",
"def getAttachId(self, cmd):\n return self.__getIDFromCID(cmd)",
"def getRA(self):\n return self._ra",
"def ad(self):\n # type: () -> int\n return self._ad",
"def _get_ldp_sync_hold_down(self):\n return self.__ldp_sync_hold_down",
"def authority_key_identifier_value(self):\n\n if self._processed_extensions is False:\n self._set_extensions()\n return self._authority_key_identifier_value",
"def downlinker(self):\n return self.__downlinker",
"def GetDC(self):\r\n\r\n return self.dc",
"def rb_attached(self):\r\n return self._rb",
"def gbr_dl(self):\n return self._gbr_dl",
"def get_dcmpwr(self):\n return self.dcmpwr",
"def _get_rmac(self):\n return self.__rmac"
] |
[
"0.655605",
"0.5696708",
"0.56255716",
"0.56253946",
"0.53547883",
"0.52089995",
"0.519653",
"0.5169977",
"0.5113252",
"0.5102018",
"0.5069784",
"0.50382006",
"0.503496",
"0.50301373",
"0.5023109",
"0.5008546",
"0.49819642",
"0.49755028",
"0.4973189",
"0.4961231",
"0.49564445",
"0.49464023",
"0.4940165",
"0.4893561",
"0.4886817",
"0.48833063",
"0.48750407",
"0.47984233",
"0.47766706",
"0.4761165"
] |
0.85125786
|
0
|
Helper function to obtain the value of assignee from identity_profile or service_principal_profile.
|
def get_assignee_from_identity_or_sp_profile(self) -> Tuple[str, bool]:
assignee = None
is_service_principal = False
if check_is_msi_cluster(self.mc):
if self.mc.identity_profile is None or self.mc.identity_profile["kubeletidentity"] is None:
raise UnknownError(
"Unexpected error getting kubelet's identity for the cluster. "
"Please do not set --attach-acr or --detach-acr. "
"You can manually grant or revoke permission to the identity named "
"<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR."
)
assignee = self.mc.identity_profile["kubeletidentity"].object_id
is_service_principal = False
elif self.mc and self.mc.service_principal_profile is not None:
assignee = self.mc.service_principal_profile.client_id
is_service_principal = True
if not assignee:
raise UnknownError('Cannot get the AKS cluster\'s service principal.')
return assignee, is_service_principal
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def assignee(self):\n membership = UnitMembershipFactory(unit=self.unit)\n return membership.user",
"def get_assignee_email(self, assignee_id):\n response = self.http_call(\"{0}/users/{1}.json\".format(self.uri, assignee_id))\n return json.loads(response.content.decode(sys.stdout.encoding, \"replace\"))[\"user\"][\"email\"]",
"def get_subscription_owner(request, profile_item):\n return profile_item.topic.profile.km_user.user",
"def get_subscription_owner(request, profile):\n return profile.km_user.user",
"def assigned_to(self) -> Optional[str]:\n return pulumi.get(self, \"assigned_to\")",
"def assigned_to(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"assigned_to\")",
"def _getProfileForRole(entity, profile_model):\n\n if isinstance(entity, profile_model):\n return entity\n\n if isinstance(entity, OrgAdmin) or isinstance(entity, Mentor):\n key_name = entity.program.key().name() + '/' + entity.user.key().name()\n else:\n key_name = entity.key().name()\n\n parent = entity.user\n return profile_model.get_by_key_name(key_name, parent=parent)",
"def get_user_profile(self):\n return self.user.profile",
"def get_user_assignd_identity_from_mc(self) -> Union[str, None]:\n user_assigned_identity = None\n if self.mc and self.mc.identity and self.mc.identity.user_assigned_identities:\n user_assigned_identity = safe_list_get(list(self.mc.identity.user_assigned_identities.keys()), 0, None)\n return user_assigned_identity",
"def assigned_user(self):\n return self._assigned_user",
"def render_assigned_user(self, value):\n return value.get_full_name() or value",
"def get_subscription_owner(request, list_entry):\n return list_entry.profile_item.topic.profile.km_user.user",
"def primary_user_assigned_identity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"primary_user_assigned_identity\")",
"def primary_user_assigned_identity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"primary_user_assigned_identity\")",
"def given_name(self):\n profile = self._json['author-profile']\n return profile.get('preferred-name', {}).get('given-name')",
"def get_profile_id(self, profile):\n return profile['id']",
"def identity(self) -> pulumi.Input['UserAssignedIdentityArgs']:\n return pulumi.get(self, \"identity\")",
"def _assignment(info):\n\n return info.ui.context['object']",
"def _getProfileFromUser(self):\n # Make sure user is authenticated\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Get Profile from datastore\n user_id = user.email()\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n # Create new Profile if not there\n if not profile:\n profile = Profile(\n key = p_key,\n displayName = user.nickname(),\n mainEmail= user.email(),\n teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n return profile",
"def determine_preferred_contact(user_data):\n try:\n user_data['personal']['email']\n except KeyError:\n preferred_contact = 'mail'\n else:\n preferred_contact = 'email'\n return preferred_contact",
"def get_primary_email(lookup_value, lookup_type=\"id\"):\n lookup_type = _validate_lookup_type(lookup_type, 'email')\n user_data = core.get_data('people', lookup_value, lookup_type, return_json=True)\n primary_email = user_data['emails'][0]['value']\n return primary_email",
"def profile(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"profile\")",
"def profile(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"profile\")",
"def profile_for(email):\n return OrderedDict([(b'email', email), (b'uid', b'10'), (b'role', b'user')])",
"def get_current_profile() -> Optional[Profile]:\n return _PROFILE[-1] if _PROFILE else None",
"def get_assign(self):\n return self.assign",
"def primary_user_assigned_identity(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"primary_user_assigned_identity\")",
"def _evaluate_user_id(self, dispatcher, tracker):\n person = dispatcher.output_channel.get_person_by_id(dispatcher.sender_id)\n user = tracker.get_slot('user')\n if user is None:\n # Todo Replace self assignment\n user = person.aclattr\n\n return user",
"def get_assign_identity(self) -> Union[str, None]:\n\n return self._get_assign_identity(enable_validation=True)",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(\n km_user__profile__pk=self.kwargs.get(\"pk\")\n )"
] |
[
"0.64556867",
"0.6103636",
"0.6004778",
"0.5997133",
"0.59836423",
"0.59250575",
"0.58888984",
"0.5654481",
"0.56522924",
"0.5596333",
"0.55668586",
"0.5554263",
"0.55247456",
"0.55247456",
"0.5475379",
"0.5460796",
"0.54219383",
"0.5416374",
"0.5414677",
"0.54078645",
"0.5397156",
"0.5367131",
"0.5367131",
"0.5354289",
"0.5339485",
"0.5311255",
"0.52958435",
"0.5280547",
"0.5269825",
"0.5259709"
] |
0.757959
|
0
|
Internal function to obtain the value of load_balancer_sku, default value is CONST_LOAD_BALANCER_SKU_STANDARD.
|
def _get_load_balancer_sku(self, enable_validation: bool = False) -> Union[str, None]:
# read the original value passed by the command
load_balancer_sku = safe_lower(self.raw_param.get("load_balancer_sku", CONST_LOAD_BALANCER_SKU_STANDARD))
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_sku is not None
):
load_balancer_sku = safe_lower(
self.mc.network_profile.load_balancer_sku
)
# validation
if enable_validation:
if load_balancer_sku == CONST_LOAD_BALANCER_SKU_BASIC:
if self._get_api_server_authorized_ip_ranges(enable_validation=False):
raise InvalidArgumentValueError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
if self._get_enable_private_cluster(enable_validation=False):
raise InvalidArgumentValueError(
"Please use standard load balancer for private cluster"
)
return load_balancer_sku
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_load_balancer_sku(self) -> Union[str, None]:\n return safe_lower(self._get_load_balancer_sku(enable_validation=True))",
"def sku(self):\n return self._sku",
"def sku(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional[pulumi.Input['StorageAccountSpecSkuArgs']]:\n return pulumi.get(self, \"sku\")",
"def sku_name(self) -> str:\n return pulumi.get(self, \"sku_name\")",
"def sku_name(self) -> str:\n return pulumi.get(self, \"sku_name\")",
"def sku_name(self) -> str:\n return pulumi.get(self, \"sku_name\")",
"def rack_sku_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rack_sku_id\")",
"def sku_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"sku_name\")",
"def sku(self) -> Optional[pulumi.Input['EventhubNamespaceSpecSkuArgs']]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional[pulumi.Input['KeyVaultSpecSkuArgs']]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> pulumi.Output['outputs.IotHubDpsSku']:\n return pulumi.get(self, \"sku\")",
"def vm_sku_name(self) -> str:\n return pulumi.get(self, \"vm_sku_name\")",
"def sku_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku_name\")",
"def sku_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku_name\")",
"def premium_sku(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"premium_sku\")",
"def sku(self) -> Optional[pulumi.Input['RedisCacheSpecPropertiesSkuArgs']]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> 'outputs.SkuResponse':\n return pulumi.get(self, \"sku\")",
"def sku(self) -> 'outputs.SkuResponse':\n return pulumi.get(self, \"sku\")",
"def sku(self) -> pulumi.Output['outputs.SkuResponse']:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional[pulumi.Input['IotHubDpsSkuArgs']]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional['outputs.SkuResponse']:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional['outputs.SkuResponse']:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> pulumi.Output[Optional['outputs.SkuDescriptionResponse']]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> pulumi.Output[Optional['outputs.SkuDescriptionResponse']]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional['outputs.DiskSkuResponse']:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> pulumi.Input['LabVirtualMachineSkuArgs']:\n return pulumi.get(self, \"sku\")",
"def os_sku(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_sku\")",
"def os_sku(self) -> Optional[pulumi.Input[Union[str, 'OSSKU']]]:\n return pulumi.get(self, \"os_sku\")"
] |
[
"0.87513345",
"0.6634995",
"0.6579724",
"0.6579724",
"0.6484944",
"0.64583135",
"0.64583135",
"0.64583135",
"0.6454628",
"0.64175785",
"0.6401533",
"0.6374767",
"0.63584584",
"0.63446355",
"0.63356906",
"0.63356906",
"0.6311857",
"0.62470627",
"0.6216702",
"0.6216702",
"0.62132645",
"0.6186024",
"0.617051",
"0.617051",
"0.6160951",
"0.6160951",
"0.615425",
"0.61274576",
"0.611186",
"0.60999775"
] |
0.75678056
|
1
|
Obtain the value of load_balancer_sku, default value is CONST_LOAD_BALANCER_SKU_STANDARD.
|
def get_load_balancer_sku(self) -> Union[str, None]:
return safe_lower(self._get_load_balancer_sku(enable_validation=True))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_load_balancer_sku(self, enable_validation: bool = False) -> Union[str, None]:\n # read the original value passed by the command\n load_balancer_sku = safe_lower(self.raw_param.get(\"load_balancer_sku\", CONST_LOAD_BALANCER_SKU_STANDARD))\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_sku is not None\n ):\n load_balancer_sku = safe_lower(\n self.mc.network_profile.load_balancer_sku\n )\n\n # validation\n if enable_validation:\n if load_balancer_sku == CONST_LOAD_BALANCER_SKU_BASIC:\n if self._get_api_server_authorized_ip_ranges(enable_validation=False):\n raise InvalidArgumentValueError(\n \"--api-server-authorized-ip-ranges can only be used with standard load balancer\"\n )\n if self._get_enable_private_cluster(enable_validation=False):\n raise InvalidArgumentValueError(\n \"Please use standard load balancer for private cluster\"\n )\n\n return load_balancer_sku",
"def sku(self):\n return self._sku",
"def sku(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> pulumi.Output['outputs.IotHubDpsSku']:\n return pulumi.get(self, \"sku\")",
"def rack_sku_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rack_sku_id\")",
"def sku_name(self) -> str:\n return pulumi.get(self, \"sku_name\")",
"def sku_name(self) -> str:\n return pulumi.get(self, \"sku_name\")",
"def sku_name(self) -> str:\n return pulumi.get(self, \"sku_name\")",
"def sku(self) -> Optional[pulumi.Input['StorageAccountSpecSkuArgs']]:\n return pulumi.get(self, \"sku\")",
"def sku_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"sku_name\")",
"def premium_sku(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"premium_sku\")",
"def sku(self) -> Optional[pulumi.Input['KeyVaultSpecSkuArgs']]:\n return pulumi.get(self, \"sku\")",
"def vm_sku_name(self) -> str:\n return pulumi.get(self, \"vm_sku_name\")",
"def sku(self) -> pulumi.Output['outputs.SkuResponse']:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> 'outputs.SkuResponse':\n return pulumi.get(self, \"sku\")",
"def sku(self) -> 'outputs.SkuResponse':\n return pulumi.get(self, \"sku\")",
"def sku_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku_name\")",
"def sku_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku_name\")",
"def sku(self) -> Optional[pulumi.Input['EventhubNamespaceSpecSkuArgs']]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional[pulumi.Input['RedisCacheSpecPropertiesSkuArgs']]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> pulumi.Output[Optional['outputs.SkuDescriptionResponse']]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> pulumi.Output[Optional['outputs.SkuDescriptionResponse']]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional['outputs.SkuResponse']:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional['outputs.SkuResponse']:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional[pulumi.Input['IotHubDpsSkuArgs']]:\n return pulumi.get(self, \"sku\")",
"def sku(self) -> Optional['outputs.DiskSkuResponse']:\n return pulumi.get(self, \"sku\")",
"def os_sku(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_sku\")",
"def sku(self) -> pulumi.Input['LabVirtualMachineSkuArgs']:\n return pulumi.get(self, \"sku\")",
"def os_sku(self) -> Optional[pulumi.Input[Union[str, 'OSSKU']]]:\n return pulumi.get(self, \"os_sku\")"
] |
[
"0.7420787",
"0.6919118",
"0.6872399",
"0.6872399",
"0.6720261",
"0.6656864",
"0.66411835",
"0.66411835",
"0.66411835",
"0.6632395",
"0.6599251",
"0.658569",
"0.65780234",
"0.6554864",
"0.654954",
"0.65166485",
"0.65166485",
"0.64975655",
"0.64975655",
"0.64945287",
"0.6489658",
"0.6471814",
"0.6471814",
"0.6467048",
"0.6467048",
"0.6417507",
"0.6395121",
"0.63887006",
"0.6297467",
"0.62874734"
] |
0.877468
|
0
|
Obtain the value of load_balancer_managed_outbound_ip_count.
|
def get_load_balancer_managed_outbound_ip_count(self) -> Union[int, None]:
# read the original value passed by the command
load_balancer_managed_outbound_ip_count = self.raw_param.get(
"load_balancer_managed_outbound_ip_count"
)
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count is not None
):
load_balancer_managed_outbound_ip_count = (
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return load_balancer_managed_outbound_ip_count
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_load_balancer_managed_outbound_ipv6_count(self) -> Union[int, None]:\n count_ipv6 = self.raw_param.get('load_balancer_managed_outbound_ipv6_count')\n\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None\n ):\n count_ipv6 = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if (\n not self.get_load_balancer_outbound_ips() and\n not self.get_load_balancer_outbound_ip_prefixes() and\n count_ipv6 is None\n ):\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None\n ):\n count_ipv6 = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6\n )\n\n return count_ipv6",
"def get_nat_gateway_managed_outbound_ip_count(self) -> Union[int, None]:\n # read the original value passed by the command\n nat_gateway_managed_outbound_ip_count = self.raw_param.get(\"nat_gateway_managed_outbound_ip_count\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.nat_gateway_profile and\n self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile and\n self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile.count is not None\n ):\n nat_gateway_managed_outbound_ip_count = (\n self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile.count\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return nat_gateway_managed_outbound_ip_count",
"def managed_outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs']]:\n return pulumi.get(self, \"managed_outbound_ips\")",
"def get_load_balancer_outbound_ips(self) -> Union[str, List[ResourceReference], None]:\n # read the original value passed by the command\n load_balancer_outbound_ips = self.raw_param.get(\n \"load_balancer_outbound_ips\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps is not None\n ):\n load_balancer_outbound_ips = (\n self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_outbound_ips",
"def get_load_balancer_outbound_ports(self) -> Union[int, None]:\n # read the original value passed by the command\n load_balancer_outbound_ports = self.raw_param.get(\n \"load_balancer_outbound_ports\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.allocated_outbound_ports is not None\n ):\n load_balancer_outbound_ports = (\n self.mc.network_profile.load_balancer_profile.allocated_outbound_ports\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_outbound_ports",
"def outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPsArgs']]:\n return pulumi.get(self, \"outbound_ips\")",
"def available_ip_address_count(self) -> int:\n return pulumi.get(self, \"available_ip_address_count\")",
"def effective_outbound_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:\n return pulumi.get(self, \"effective_outbound_ips\")",
"def effective_outbound_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:\n return pulumi.get(self, \"effective_outbound_ips\")",
"def managed_outbound_ip_profile(self) -> Optional[pulumi.Input['ManagedClusterManagedOutboundIPProfileArgs']]:\n return pulumi.get(self, \"managed_outbound_ip_profile\")",
"def allocated_outbound_ports(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"allocated_outbound_ports\")",
"def get_outbound_ip(self):\n try:\n response = self._make_internal(\"ip\")\n except errors.NoRemoteServicesConnection:\n logging.error(\"Unable to connect to Bad-Actor.Services\")\n return False\n\n self.outbound_ip = response.json()[\"ip\"]\n\n return self.outbound_ip",
"def LnsCount(self):\n if self.force_auto_sync:\n self.get('LnsCount')\n return self._LnsCount",
"def secondary_private_ip_address_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secondary_private_ip_address_count\")",
"def getblockcount(self):\n return self.proxy.getblockcount()",
"def get_network_allocations_number(self):\r\n LOG.debug(\"Get network allocations number.\")\r\n return constants.IP_ALLOCATIONS",
"def GetNumberOfLabels(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUS2_GetNumberOfLabels(self)",
"def count_update_pool_size(self) -> ConfigNodePropertyInteger:\n return self._count_update_pool_size",
"def GetNumberOfLabels(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUS2_GetNumberOfLabels(self)",
"def GetNumberOfLabels(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2IUS2_GetNumberOfLabels(self)",
"def getInstCount(self):\n return self.instCount",
"def _get_adj_rib_out_count(self):\n return self.__adj_rib_out_count",
"def GetNumberOfBins(self):\n return _itkStatisticsLabelMapFilterPython.itkStatisticsLabelMapFilterLM2IUS2_GetNumberOfBins(self)",
"def GetNumberOfLabels(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2ISS2_GetNumberOfLabels(self)",
"def get_load_balancer_outbound_ip_prefixes(self) -> Union[str, List[ResourceReference], None]:\n # read the original value passed by the command\n load_balancer_outbound_ip_prefixes = self.raw_param.get(\n \"load_balancer_outbound_ip_prefixes\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes and\n self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes is not None\n ):\n load_balancer_outbound_ip_prefixes = (\n self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_outbound_ip_prefixes",
"def GetNumberOfLabels(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUS2_GetNumberOfLabels(self)",
"def getconnectioncount(self):\n return self.proxy.getconnectioncount()",
"def get_net_adapters_count(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetNetAdaptersCount', self.handle)",
"def pool_size(self) -> ConfigNodePropertyInteger:\n return self._pool_size",
"def get_net_adapters_count(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetNetAdaptersCount', self.handle)"
] |
[
"0.7659348",
"0.7563879",
"0.7067809",
"0.69423544",
"0.64291984",
"0.6378259",
"0.62430793",
"0.6232198",
"0.6232198",
"0.62235296",
"0.6193783",
"0.61656964",
"0.61099416",
"0.6103169",
"0.5870814",
"0.57378906",
"0.5683588",
"0.56593734",
"0.56523633",
"0.56393576",
"0.5634664",
"0.56010705",
"0.5597311",
"0.55783707",
"0.55498844",
"0.5544131",
"0.55437696",
"0.5533316",
"0.5532725",
"0.54928774"
] |
0.8522993
|
0
|
Obtain the expected count of IPv6 managed outbound IPs.
|
def get_load_balancer_managed_outbound_ipv6_count(self) -> Union[int, None]:
count_ipv6 = self.raw_param.get('load_balancer_managed_outbound_ipv6_count')
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None
):
count_ipv6 = (
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6
)
elif self.decorator_mode == DecoratorMode.UPDATE:
if (
not self.get_load_balancer_outbound_ips() and
not self.get_load_balancer_outbound_ip_prefixes() and
count_ipv6 is None
):
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None
):
count_ipv6 = (
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6
)
return count_ipv6
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def NumberOfMappingIPV6Ranges(self):\r\n\t\treturn self._get_attribute('numberOfMappingIPV6Ranges')",
"def count_i_pv6(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count_i_pv6\")",
"def ipv6_addresses(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"ipv6_addresses\")",
"def available_ip_address_count(self) -> int:\n return pulumi.get(self, \"available_ip_address_count\")",
"def size_out(self):\n if isinstance(self.ensemble.neuron_type, Direct):\n # This will prevent users from connecting/probing Direct neurons\n # (since there aren't actually any neurons being simulated).\n return 0\n return self.ensemble.n_neurons",
"def ipv6_bandwidth(self):\n return self._ipv6_bandwidth",
"def part_2(ranges: 'RangeSet', total_ips_count: int = 1 << 32) -> int:\n\n allowed_count = total_ips_count - len(ranges)\n print(f\"part 2: there are total {allowed_count} allowed IPs\")\n return allowed_count",
"def test_03_verify_upgraded_ipv6_network(self):\n\n self.createIpv4NetworkOffering(False)\n self.createIpv6NetworkOfferingForUpdate(False)\n self.createTinyServiceOffering()\n self.prepareRoutingTestResourcesInBackground()\n self.deployNetwork()\n self.deployNetworkVm()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()",
"def Ipv6rate(self):\n\t\treturn self._get_attribute('ipv6rate')",
"def test_01_verify_ipv6_network(self):\n\n self.createIpv6NetworkOffering()\n self.createIpv6NetworkOfferingForUpdate()\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()",
"def allocated_outbound_ports(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"allocated_outbound_ports\")",
"async def test_ipv6_configuration(\n ip6config_service: IP6ConfigService, dbus_session_bus: MessageBus\n):\n ip6 = IpConfiguration(\"/org/freedesktop/NetworkManager/IP6Config/1\", ip4=False)\n\n assert ip6.gateway is None\n assert ip6.nameservers is None\n\n await ip6.connect(dbus_session_bus)\n\n assert ip6.gateway == IPv6Address(\"fe80::da58:d7ff:fe00:9c69\")\n assert ip6.nameservers == [\n IPv6Address(\"2001:1620:2777:1::10\"),\n IPv6Address(\"2001:1620:2777:2::20\"),\n ]\n\n ip6config_service.emit_properties_changed({\"Gateway\": \"2001:1620:2777:1::10\"})\n await ip6config_service.ping()\n assert ip6.gateway == IPv6Address(\"2001:1620:2777:1::10\")\n\n ip6config_service.emit_properties_changed({}, [\"Gateway\"])\n await ip6config_service.ping()\n await ip6config_service.ping()\n assert ip6.gateway == IPv6Address(\"fe80::da58:d7ff:fe00:9c69\")",
"def _FixIPv6Address(self, netblocks):\n new_list = []\n length = len(netblocks)\n if length > 0:\n number_ipv6 = 0\n for netblock in netblocks:\n if netblock.version == 4:\n new_list.append(netblock)\n elif netblock.version == 6:\n number_ipv6 += 1\n if number_ipv6 == length:\n return True, new_list\n return False, new_list",
"def ipv6_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ipv6_addresses\")",
"def test_add_autoassigned_pool_ipv6(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv6 addresses gives what we expect\n workloads = self._setup_env(host, count=2,\n ip=self.DEFAULT_IPV6_POOL)\n\n workloads[0].assert_can_ping(\"fd80:24e2:f998:72d6::1\", retries=3)\n workloads[1].assert_can_ping(\"fd80:24e2:f998:72d6::\", retries=3)",
"def SupportsIPv6(self) -> bool:",
"def get_nat_gateway_managed_outbound_ip_count(self) -> Union[int, None]:\n # read the original value passed by the command\n nat_gateway_managed_outbound_ip_count = self.raw_param.get(\"nat_gateway_managed_outbound_ip_count\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.nat_gateway_profile and\n self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile and\n self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile.count is not None\n ):\n nat_gateway_managed_outbound_ip_count = (\n self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile.count\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return nat_gateway_managed_outbound_ip_count",
"def ipv6_networks(view):\n return \"ipv6network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"",
"def _get_adj_rib_out_count(self):\n return self.__adj_rib_out_count",
"def Ipv6Srh(self):\r\n\t\treturn self._get_attribute('ipv6Srh')",
"def OSSupportsIPv6(self) -> bool:",
"def test_02_verify_ipv6_network_redundant(self):\n\n self.createIpv6NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.deployNetwork()\n self.deployNetworkVm()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.prepareRoutingTestResourcesInBackground()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()",
"def get_uplink_cnt(self) -> int:\n try:\n self._serial.transmit(b'\\x53\\x00')\n response = self._get_reply(0x53, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)",
"def countEdges(self):\n n = 0\n for (hub, table) in self.totsupport.iteritems():\n n += len(table)\n return n",
"def countEdges(self):\n return numpy.count_nonzero(self.supportArray) / 2",
"def secondary_private_ip_address_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secondary_private_ip_address_count\")",
"def Ipv6Flag(self):\r\n\t\treturn self._get_attribute('ipv6Flag')",
"def getPacketCount(self):\n return 1",
"def test_04_verify_upgraded_ipv6_network_redundant(self):\n\n self.createIpv4NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.prepareRoutingTestResourcesInBackground()\n self.deployNetwork()\n self.deployNetworkVm()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()",
"def num_outputs(cls) -> list[int]:\n return [5] * 10"
] |
[
"0.68127364",
"0.5884466",
"0.5732813",
"0.571151",
"0.5619682",
"0.55843383",
"0.5579877",
"0.5567183",
"0.55664575",
"0.5534347",
"0.5514286",
"0.5499428",
"0.5474871",
"0.5454379",
"0.54475284",
"0.54290825",
"0.5425438",
"0.54230106",
"0.5418349",
"0.53125",
"0.53111243",
"0.5301711",
"0.53001046",
"0.5288773",
"0.528768",
"0.52862597",
"0.5277907",
"0.5267859",
"0.5267751",
"0.5221719"
] |
0.70227295
|
0
|
Obtain the value of load_balancer_outbound_ips.
|
def get_load_balancer_outbound_ips(self) -> Union[str, List[ResourceReference], None]:
# read the original value passed by the command
load_balancer_outbound_ips = self.raw_param.get(
"load_balancer_outbound_ips"
)
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.outbound_i_ps and
self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps is not None
):
load_balancer_outbound_ips = (
self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return load_balancer_outbound_ips
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPsArgs']]:\n return pulumi.get(self, \"outbound_ips\")",
"def effective_outbound_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:\n return pulumi.get(self, \"effective_outbound_ips\")",
"def effective_outbound_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:\n return pulumi.get(self, \"effective_outbound_ips\")",
"def managed_outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs']]:\n return pulumi.get(self, \"managed_outbound_ips\")",
"def get_outbound_ip(self):\n try:\n response = self._make_internal(\"ip\")\n except errors.NoRemoteServicesConnection:\n logging.error(\"Unable to connect to Bad-Actor.Services\")\n return False\n\n self.outbound_ip = response.json()[\"ip\"]\n\n return self.outbound_ip",
"def get_load_balancer_outbound_ports(self) -> Union[int, None]:\n # read the original value passed by the command\n load_balancer_outbound_ports = self.raw_param.get(\n \"load_balancer_outbound_ports\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.allocated_outbound_ports is not None\n ):\n load_balancer_outbound_ports = (\n self.mc.network_profile.load_balancer_profile.allocated_outbound_ports\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_outbound_ports",
"def get_load_balancer_outbound_ip_prefixes(self) -> Union[str, List[ResourceReference], None]:\n # read the original value passed by the command\n load_balancer_outbound_ip_prefixes = self.raw_param.get(\n \"load_balancer_outbound_ip_prefixes\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes and\n self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes is not None\n ):\n load_balancer_outbound_ip_prefixes = (\n self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_outbound_ip_prefixes",
"def get_load_balancer_managed_outbound_ip_count(self) -> Union[int, None]:\n # read the original value passed by the command\n load_balancer_managed_outbound_ip_count = self.raw_param.get(\n \"load_balancer_managed_outbound_ip_count\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count is not None\n ):\n load_balancer_managed_outbound_ip_count = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_managed_outbound_ip_count",
"def slb_ip(self) -> str:\n return pulumi.get(self, \"slb_ip\")",
"def get_load_balancer_ip(cluster_config):\n cluster = load_cluster_config_json(cluster_config)\n\n lb_ip = cluster[\"load_balancers\"][0][\"ip\"]\n return lb_ip",
"def slb_ip(self) -> Optional[str]:\n return pulumi.get(self, \"slb_ip\")",
"def outbound_ip_prefixes(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPPrefixesArgs']]:\n return pulumi.get(self, \"outbound_ip_prefixes\")",
"def get_load_balancer_managed_outbound_ipv6_count(self) -> Union[int, None]:\n count_ipv6 = self.raw_param.get('load_balancer_managed_outbound_ipv6_count')\n\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None\n ):\n count_ipv6 = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if (\n not self.get_load_balancer_outbound_ips() and\n not self.get_load_balancer_outbound_ip_prefixes() and\n count_ipv6 is None\n ):\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None\n ):\n count_ipv6 = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6\n )\n\n return count_ipv6",
"def get_balancer_arn(self):\n return self.get_balancer_info()['LoadBalancerArn']",
"def private_ip_addresses(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"private_ip_addresses\")",
"def bridgeIP(self):\r\n return self._bridgeIP",
"def allocated_outbound_ports(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"allocated_outbound_ports\")",
"def get_local_lbs(self):\r\n mask = ('mask[loadBalancerHardware[datacenter],ipAddress]')\r\n return self.account.getAdcLoadBalancers(mask=mask)",
"def allowed_ips(self):\n\n return value_list_to_comma('AllowedIPs', self._peer.allowed_ips)",
"def load_balancer_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"load_balancer_id\")",
"def managed_outbound_ip_profile(self) -> Optional[pulumi.Input['ManagedClusterManagedOutboundIPProfileArgs']]:\n return pulumi.get(self, \"managed_outbound_ip_profile\")",
"def getDestinationIp(self):\n return self.destinationIp",
"def load_balancer_id(self):\n return self._load_balancer_id",
"def internalIP(self):\r\n return self._internalIP",
"def get_balancer_info(self):\n try:\n response = self.client.describe_load_balancers(\n Names=[self.get_balancer_name()],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n vpc_id = self.get_vpc_id()\n balancers = [balancer for balancer in response['LoadBalancers'] if balancer['VpcId'] == vpc_id]\n\n return balancers[0]\n except ClientError:\n self.logger.debug('Unable to find load balancer {}.'.format(self.get_balancer_name()))\n return None",
"def get_ip_range(self):\n return self._ip_range",
"def endpoint_ip(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint_ip\")",
"def private_ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_ip_address\")",
"def private_ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_ip_address\")",
"def dns_server_ips(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"dns_server_ips\")"
] |
[
"0.78112",
"0.7257709",
"0.7257709",
"0.72368276",
"0.70122385",
"0.67246056",
"0.65907145",
"0.6496916",
"0.62983847",
"0.6181122",
"0.6021635",
"0.59804964",
"0.58676976",
"0.58561325",
"0.58237296",
"0.5787278",
"0.57423633",
"0.57200485",
"0.5657679",
"0.5613782",
"0.5594104",
"0.55854344",
"0.5566745",
"0.55618787",
"0.55587023",
"0.55554354",
"0.55502844",
"0.5541544",
"0.5541544",
"0.5521863"
] |
0.8407174
|
0
|
Obtain the value of load_balancer_outbound_ip_prefixes.
|
def get_load_balancer_outbound_ip_prefixes(self) -> Union[str, List[ResourceReference], None]:
# read the original value passed by the command
load_balancer_outbound_ip_prefixes = self.raw_param.get(
"load_balancer_outbound_ip_prefixes"
)
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes and
self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes is not None
):
load_balancer_outbound_ip_prefixes = (
self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return load_balancer_outbound_ip_prefixes
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def outbound_ip_prefixes(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPPrefixesArgs']]:\n return pulumi.get(self, \"outbound_ip_prefixes\")",
"def service_load_balancer_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"service_load_balancer_prefixes\")",
"def get_load_balancer_outbound_ips(self) -> Union[str, List[ResourceReference], None]:\n # read the original value passed by the command\n load_balancer_outbound_ips = self.raw_param.get(\n \"load_balancer_outbound_ips\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps is not None\n ):\n load_balancer_outbound_ips = (\n self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_outbound_ips",
"def address_prefixes(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"address_prefixes\")",
"def outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPsArgs']]:\n return pulumi.get(self, \"outbound_ips\")",
"def NoOfAddressPrefix(self):\n return self._get_attribute('noOfAddressPrefix')",
"def address_space_prefixes(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"address_space_prefixes\")",
"def get_balancer_dns(self):\n return self.get_balancer_info()['DNSName']",
"def get_local_lbs(self):\r\n mask = ('mask[loadBalancerHardware[datacenter],ipAddress]')\r\n return self.account.getAdcLoadBalancers(mask=mask)",
"def remote_address_space_prefixes(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"remote_address_space_prefixes\")",
"def public_ip_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:\n return pulumi.get(self, \"public_ip_prefixes\")",
"def slb_ip(self) -> str:\n return pulumi.get(self, \"slb_ip\")",
"def load_balancer_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"load_balancer_id\")",
"def address_space_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"address_space_prefixes\")",
"def get_balancer_arn(self):\n return self.get_balancer_info()['LoadBalancerArn']",
"def load_balancer_id(self):\n return self._load_balancer_id",
"def managed_outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs']]:\n return pulumi.get(self, \"managed_outbound_ips\")",
"def prefixlen(self):\n return self._ip_range.prefixlen",
"def slb_ip(self) -> Optional[str]:\n return pulumi.get(self, \"slb_ip\")",
"def remote_address_space_prefixes(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"remote_address_space_prefixes\")",
"def get_load_balancer_ip(cluster_config):\n cluster = load_cluster_config_json(cluster_config)\n\n lb_ip = cluster[\"load_balancers\"][0][\"ip\"]\n return lb_ip",
"def get_load_balancer_outbound_ports(self) -> Union[int, None]:\n # read the original value passed by the command\n load_balancer_outbound_ports = self.raw_param.get(\n \"load_balancer_outbound_ports\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.allocated_outbound_ports is not None\n ):\n load_balancer_outbound_ports = (\n self.mc.network_profile.load_balancer_profile.allocated_outbound_ports\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_outbound_ports",
"def remote_address_space_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"remote_address_space_prefixes\")",
"def nodebalancer_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"nodebalancer_id\")",
"def ip_address_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address_prefix\")",
"def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")",
"def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")",
"def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")",
"def subnet_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"subnet_prefix\")",
"def get_load_balancer_managed_outbound_ip_count(self) -> Union[int, None]:\n # read the original value passed by the command\n load_balancer_managed_outbound_ip_count = self.raw_param.get(\n \"load_balancer_managed_outbound_ip_count\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count is not None\n ):\n load_balancer_managed_outbound_ip_count = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_managed_outbound_ip_count"
] |
[
"0.780894",
"0.68973035",
"0.6727519",
"0.61115503",
"0.60654867",
"0.5975894",
"0.59265953",
"0.5886564",
"0.5806852",
"0.57933456",
"0.57694465",
"0.5723562",
"0.5718137",
"0.56745446",
"0.5617213",
"0.5616012",
"0.5605659",
"0.5599391",
"0.5581525",
"0.5579103",
"0.5526539",
"0.5497921",
"0.54964364",
"0.5410183",
"0.54083824",
"0.54079264",
"0.54079264",
"0.54079264",
"0.5338781",
"0.5274223"
] |
0.814535
|
0
|
Obtain the value of load_balancer_outbound_ports.
|
def get_load_balancer_outbound_ports(self) -> Union[int, None]:
# read the original value passed by the command
load_balancer_outbound_ports = self.raw_param.get(
"load_balancer_outbound_ports"
)
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.allocated_outbound_ports is not None
):
load_balancer_outbound_ports = (
self.mc.network_profile.load_balancer_profile.allocated_outbound_ports
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return load_balancer_outbound_ports
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def allocated_outbound_ports(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"allocated_outbound_ports\")",
"def get_load_balancer_outbound_ips(self) -> Union[str, List[ResourceReference], None]:\n # read the original value passed by the command\n load_balancer_outbound_ips = self.raw_param.get(\n \"load_balancer_outbound_ips\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps is not None\n ):\n load_balancer_outbound_ips = (\n self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_outbound_ips",
"def get_load_balancer_managed_outbound_ip_count(self) -> Union[int, None]:\n # read the original value passed by the command\n load_balancer_managed_outbound_ip_count = self.raw_param.get(\n \"load_balancer_managed_outbound_ip_count\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count is not None\n ):\n load_balancer_managed_outbound_ip_count = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_managed_outbound_ip_count",
"def outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPsArgs']]:\n return pulumi.get(self, \"outbound_ips\")",
"def get_load_balancer_outbound_ip_prefixes(self) -> Union[str, List[ResourceReference], None]:\n # read the original value passed by the command\n load_balancer_outbound_ip_prefixes = self.raw_param.get(\n \"load_balancer_outbound_ip_prefixes\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes and\n self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes is not None\n ):\n load_balancer_outbound_ip_prefixes = (\n self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_outbound_ip_prefixes",
"def trafficOutboundPorts(self):\n #\n # TODO: Reimplement this if possible\n #\n return client.trafficOutboundPorts(self)",
"def managed_outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs']]:\n return pulumi.get(self, \"managed_outbound_ips\")",
"def get_outbound_ip(self):\n try:\n response = self._make_internal(\"ip\")\n except errors.NoRemoteServicesConnection:\n logging.error(\"Unable to connect to Bad-Actor.Services\")\n return False\n\n self.outbound_ip = response.json()[\"ip\"]\n\n return self.outbound_ip",
"def get_ports(self):\n return self._ports",
"def port_out(self) -> int:\n return self.proto.port_out",
"def port_ranges(self) -> pulumi.Output[Optional[Sequence['outputs.CustomRoutingEndpointTrafficPolicyPortRange']]]:\n return pulumi.get(self, \"port_ranges\")",
"def trafficInboundPorts(self):\n #\n # TODO: Reimplement this if possible\n #\n return client.trafficInboundPorts(self)",
"def ports(self): # type: () -> t.Dict[str, t.List[t.Dict[str, str]]]\n return self.network_settings['Ports']",
"def get_load_balancer_managed_outbound_ipv6_count(self) -> Union[int, None]:\n count_ipv6 = self.raw_param.get('load_balancer_managed_outbound_ipv6_count')\n\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None\n ):\n count_ipv6 = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if (\n not self.get_load_balancer_outbound_ips() and\n not self.get_load_balancer_outbound_ip_prefixes() and\n count_ipv6 is None\n ):\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None\n ):\n count_ipv6 = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6\n )\n\n return count_ipv6",
"def effective_outbound_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:\n return pulumi.get(self, \"effective_outbound_ips\")",
"def effective_outbound_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:\n return pulumi.get(self, \"effective_outbound_ips\")",
"def ports(self):\n return self.attrs.get('NetworkSettings', {}).get('Ports', {})",
"def incoming_connections_ports(self) -> Sequence[str]:\n return pulumi.get(self, \"incoming_connections_ports\")",
"def PortStatistics(self):\n return self._get_attribute('portStatistics')",
"def slb_port(self) -> str:\n return pulumi.get(self, \"slb_port\")",
"def receiver_port(self):\n return self._receiver_port",
"def port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CustomRoutingEndpointTrafficPolicyPortRangeArgs']]]]:\n return pulumi.get(self, \"port_ranges\")",
"def port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CustomRoutingEndpointTrafficPolicyPortRangeArgs']]]]:\n return pulumi.get(self, \"port_ranges\")",
"def message_ports_out(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_message_ports_out(self)",
"def total_ports(self):\n return len(self.port_extension_map.keys())",
"def find_outport_by_subnet(self, subnet):\n for port_no, port in self.ports.items():\n if port.gateway and port.gateway.ipv4_subnet == subnet:\n return port_no\n return None",
"def destination_port_ranges(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"destination_port_ranges\")",
"def get_ports(self) -> tuple:\n raise NotImplementedError",
"def slb_port(self) -> Optional[str]:\n return pulumi.get(self, \"slb_port\")",
"def port_list(self):\n return self._port_list"
] |
[
"0.7598282",
"0.72015893",
"0.63595396",
"0.6313643",
"0.60974395",
"0.60765743",
"0.5943151",
"0.5860206",
"0.5785265",
"0.5699549",
"0.5690695",
"0.5621132",
"0.55925727",
"0.55866444",
"0.55693454",
"0.55693454",
"0.5550031",
"0.5505205",
"0.5474512",
"0.5430843",
"0.54095274",
"0.5396226",
"0.5396226",
"0.5353784",
"0.53335565",
"0.5304529",
"0.5294459",
"0.5278586",
"0.52719283",
"0.5267764"
] |
0.85207796
|
0
|
Obtain the value of load_balancer_idle_timeout.
|
def get_load_balancer_idle_timeout(self) -> Union[int, None]:
# read the original value passed by the command
load_balancer_idle_timeout = self.raw_param.get(
"load_balancer_idle_timeout"
)
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.idle_timeout_in_minutes is not None
):
load_balancer_idle_timeout = (
self.mc.network_profile.load_balancer_profile.idle_timeout_in_minutes
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return load_balancer_idle_timeout
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def idle_timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"idle_timeout_in_minutes\")",
"def idle_timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"idle_timeout_in_minutes\")",
"def get_boot_timeout(self):\n return self._boot_timeout",
"def get_nat_gateway_idle_timeout(self) -> Union[int, None]:\n # read the original value passed by the command\n nat_gateway_idle_timeout = self.raw_param.get(\"nat_gateway_idle_timeout\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.nat_gateway_profile and\n self.mc.network_profile.nat_gateway_profile.idle_timeout_in_minutes is not None\n ):\n nat_gateway_idle_timeout = (\n self.mc.network_profile.nat_gateway_profile.idle_timeout_in_minutes\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return nat_gateway_idle_timeout",
"def gettimeout(self):\r\n return self._timeout",
"def max_timeout(self):\n return self._max_timeout",
"def timeout(self):\n return self._data.get('timeout')",
"def idle_session_ttl_in_seconds(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"idle_session_ttl_in_seconds\")",
"def gettimeout(self):\r\n return self.sock.gettimeout()",
"def gettimeout(self):\r\n return self.sock.gettimeout()",
"def timeout_in_minutes(self) -> int:\n return pulumi.get(self, \"timeout_in_minutes\")",
"def get_timeout(self):\n return self.timeout",
"def idle_session_ttl_in_seconds(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"idle_session_ttl_in_seconds\")",
"def get_timeout(self) -> int:",
"def idle_delay(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"idle_delay\")",
"def idle_delay(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"idle_delay\")",
"def getdefaulttimeout():\r\n return default_timeout",
"def get_timeout(self):\r\n a = self.get_attributes('VisibilityTimeout')\r\n return int(a['VisibilityTimeout'])",
"def get_monitor_interval(self):\n return self.conf['icmp_check_interval']",
"def keep_alive_time(self) -> ConfigNodePropertyInteger:\n return self._keep_alive_time",
"def get_wait_timeout(self):\n if self.__wait_timeout is not None:\n return self.__wait_timeout\n return self.get_web_driver().get_wait_timeout()",
"def timeout(self):\n return self._timeout",
"def timeout(self):\n return self._timeout",
"def timeout(self):\n return self._timeout",
"def timeout(self):\n return self._timeout",
"def timeout(self) -> str:\n return pulumi.get(self, \"timeout\")",
"def getIdleTime(self):\n return self.__idleTime + time() - self.__fingerTime",
"def FlowStatResponseTimeOut(self):\n\t\treturn self._get_attribute('flowStatResponseTimeOut')",
"def ping_timeout(self) -> timedelta:\n return self._ping_timeout",
"def PortStatResponseTimeOut(self):\n\t\treturn self._get_attribute('portStatResponseTimeOut')"
] |
[
"0.7100315",
"0.7100315",
"0.67829424",
"0.6761424",
"0.6735782",
"0.6577744",
"0.6485874",
"0.6444881",
"0.6393913",
"0.6393913",
"0.6379619",
"0.63738286",
"0.636097",
"0.63357836",
"0.63131976",
"0.63131976",
"0.6284953",
"0.6280891",
"0.62770486",
"0.6243621",
"0.6233084",
"0.6216465",
"0.6216465",
"0.6216465",
"0.6216465",
"0.62157255",
"0.6188127",
"0.6185754",
"0.61838865",
"0.61809516"
] |
0.84147555
|
0
|
Obtain the value of nat_gateway_managed_outbound_ip_count.
|
def get_nat_gateway_managed_outbound_ip_count(self) -> Union[int, None]:
# read the original value passed by the command
nat_gateway_managed_outbound_ip_count = self.raw_param.get("nat_gateway_managed_outbound_ip_count")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.nat_gateway_profile and
self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile and
self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile.count is not None
):
nat_gateway_managed_outbound_ip_count = (
self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile.count
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return nat_gateway_managed_outbound_ip_count
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_load_balancer_managed_outbound_ip_count(self) -> Union[int, None]:\n # read the original value passed by the command\n load_balancer_managed_outbound_ip_count = self.raw_param.get(\n \"load_balancer_managed_outbound_ip_count\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count is not None\n ):\n load_balancer_managed_outbound_ip_count = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_managed_outbound_ip_count",
"def get_load_balancer_managed_outbound_ipv6_count(self) -> Union[int, None]:\n count_ipv6 = self.raw_param.get('load_balancer_managed_outbound_ipv6_count')\n\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None\n ):\n count_ipv6 = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if (\n not self.get_load_balancer_outbound_ips() and\n not self.get_load_balancer_outbound_ip_prefixes() and\n count_ipv6 is None\n ):\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None\n ):\n count_ipv6 = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6\n )\n\n return count_ipv6",
"def managed_outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs']]:\n return pulumi.get(self, \"managed_outbound_ips\")",
"def managed_outbound_ip_profile(self) -> Optional[pulumi.Input['ManagedClusterManagedOutboundIPProfileArgs']]:\n return pulumi.get(self, \"managed_outbound_ip_profile\")",
"def allocated_outbound_ports(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"allocated_outbound_ports\")",
"def get_outbound_ip(self):\n try:\n response = self._make_internal(\"ip\")\n except errors.NoRemoteServicesConnection:\n logging.error(\"Unable to connect to Bad-Actor.Services\")\n return False\n\n self.outbound_ip = response.json()[\"ip\"]\n\n return self.outbound_ip",
"def available_ip_address_count(self) -> int:\n return pulumi.get(self, \"available_ip_address_count\")",
"def get_net_adapters_count(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetNetAdaptersCount', self.handle)",
"def get_number_of_nat_gateway_pages(event, context):\n\n logger.info(f'DEBUG: {event}')\n\n dynamodb = boto3.resource('dynamodb')\n nat_gateways_table = dynamodb.Table(\n os.environ['DYNAMODB_TABLE_NAT_GATEWAYS'])\n\n try:\n response = []\n nat_gateways = nat_gateways_table.scan()\n for n in nat_gateways['Items']:\n response.append(n['PublicIp'] + '/32')\n\n results_per_page = _check_results_per_page(event)\n\n logger.info(f'response: {response}')\n pages = math.ceil(len(response) / results_per_page)\n logger.info(f'pages: {pages}')\n\n return _return_200(pages)\n\n except ValueError:\n _return_422('Invalid input')",
"def LnsCount(self):\n if self.force_auto_sync:\n self.get('LnsCount')\n return self._LnsCount",
"def get_network_allocations_number(self):\r\n LOG.debug(\"Get network allocations number.\")\r\n return constants.IP_ALLOCATIONS",
"def getNeuronCount(self):\n\t\treturn self.loader.getNeuronCount()",
"def TunnelCount(self):\n if self.force_auto_sync:\n self.get('TunnelCount')\n return self._TunnelCount",
"def effective_outbound_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:\n return pulumi.get(self, \"effective_outbound_ips\")",
"def effective_outbound_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:\n return pulumi.get(self, \"effective_outbound_ips\")",
"def get_net_adapters_count(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetNetAdaptersCount', self.handle)",
"def getconnectioncount(self):\n return self.proxy.getconnectioncount()",
"def outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPsArgs']]:\n return pulumi.get(self, \"outbound_ips\")",
"def secondary_private_ip_address_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secondary_private_ip_address_count\")",
"def get_load_balancer_outbound_ips(self) -> Union[str, List[ResourceReference], None]:\n # read the original value passed by the command\n load_balancer_outbound_ips = self.raw_param.get(\n \"load_balancer_outbound_ips\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps is not None\n ):\n load_balancer_outbound_ips = (\n self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_outbound_ips",
"def _get_adj_rib_out_count(self):\n return self.__adj_rib_out_count",
"def count_update_pool_size(self) -> ConfigNodePropertyInteger:\n return self._count_update_pool_size",
"def num_addresses(self):\n if hasattr(self, '_m_num_addresses'):\n return self._m_num_addresses if hasattr(self, '_m_num_addresses') else None\n\n self._m_num_addresses = self.num_addresses_raw.value\n return self._m_num_addresses if hasattr(self, '_m_num_addresses') else None",
"def size_out(self):\n if isinstance(self.ensemble.neuron_type, Direct):\n # This will prevent users from connecting/probing Direct neurons\n # (since there aren't actually any neurons being simulated).\n return 0\n return self.ensemble.n_neurons",
"def getInstCount(self):\n return self.instCount",
"def getblockcount(self):\n return self.proxy.getblockcount()",
"def get_inbound_statements_grid_total_calls_internal(self):\n return self.get_specific_column_value_from_grid(self.inbound_statement_grid_div_id, self.inbound_statements_grid_row_count, self.inbound_statements_grid_total_calls_internal_column_name)",
"def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit",
"def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")",
"def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")"
] |
[
"0.7827822",
"0.74722016",
"0.6817358",
"0.6476238",
"0.61126053",
"0.6101688",
"0.6061548",
"0.6059602",
"0.6006698",
"0.59497523",
"0.58619505",
"0.58606917",
"0.5841705",
"0.5818033",
"0.5818033",
"0.5814374",
"0.5783102",
"0.5766797",
"0.57649326",
"0.5712222",
"0.5636903",
"0.5599867",
"0.55610305",
"0.55555516",
"0.55537546",
"0.5546919",
"0.5475009",
"0.5469145",
"0.5466529",
"0.5466529"
] |
0.85568357
|
0
|
Obtain the value of nat_gateway_idle_timeout.
|
def get_nat_gateway_idle_timeout(self) -> Union[int, None]:
# read the original value passed by the command
nat_gateway_idle_timeout = self.raw_param.get("nat_gateway_idle_timeout")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.nat_gateway_profile and
self.mc.network_profile.nat_gateway_profile.idle_timeout_in_minutes is not None
):
nat_gateway_idle_timeout = (
self.mc.network_profile.nat_gateway_profile.idle_timeout_in_minutes
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return nat_gateway_idle_timeout
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def idle_timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"idle_timeout_in_minutes\")",
"def idle_timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"idle_timeout_in_minutes\")",
"def gettimeout(self):\r\n return self.sock.gettimeout()",
"def gettimeout(self):\r\n return self.sock.gettimeout()",
"def gettimeout(self):\r\n return self._timeout",
"def get_load_balancer_idle_timeout(self) -> Union[int, None]:\n # read the original value passed by the command\n load_balancer_idle_timeout = self.raw_param.get(\n \"load_balancer_idle_timeout\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.idle_timeout_in_minutes is not None\n ):\n load_balancer_idle_timeout = (\n self.mc.network_profile.load_balancer_profile.idle_timeout_in_minutes\n )\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return load_balancer_idle_timeout",
"def network_timeout(self):\n timeout = c_int()\n ckresult(_dll.FMOD_System_GetNetworkTimeout(self._ptr, byref(timeout)))\n return timeout.value",
"def timeout(self):\n return self._data.get('timeout')",
"def get_timeout_millis(self):\n return self.dp.get_timeout_millis()",
"def get_timeout(self):\n return self.timeout",
"def max_timeout(self):\n return self._max_timeout",
"def timeout_in_minutes(self) -> int:\n return pulumi.get(self, \"timeout_in_minutes\")",
"def get_timeout(self) -> int:",
"def timeout(self):\n return self._timeout",
"def timeout(self):\n return self._timeout",
"def timeout(self):\n return self._timeout",
"def timeout(self):\n return self._timeout",
"def get_current_timeout(cls):\n return cls.current().get_timeout()",
"def getdefaulttimeout():\r\n return default_timeout",
"def idle_session_ttl_in_seconds(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"idle_session_ttl_in_seconds\")",
"def get_default_timeout(self):\n return self._timeout",
"def timeout(self) -> str:\n return pulumi.get(self, \"timeout\")",
"def idle_session_ttl_in_seconds(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"idle_session_ttl_in_seconds\")",
"def timeout(self):\n\n return self._timeout",
"def timeout(self):\n\n return self._timeout",
"def connect_timeout(self):\n return self.__connect_timeout",
"def timeout(self) -> Optional[float]:\n return self._timeout",
"def timeout(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"timeout\")",
"def timeout(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"timeout\")",
"def sessiontimeout(self) :\n\t\ttry :\n\t\t\treturn self._sessiontimeout\n\t\texcept Exception as e:\n\t\t\traise e"
] |
[
"0.7276977",
"0.7276977",
"0.71444297",
"0.71444297",
"0.7058299",
"0.6993919",
"0.69001245",
"0.6860101",
"0.6738092",
"0.6717437",
"0.6692491",
"0.66402537",
"0.6639264",
"0.6639073",
"0.6639073",
"0.6639073",
"0.6639073",
"0.6627185",
"0.6620188",
"0.661259",
"0.66012883",
"0.6579409",
"0.6571002",
"0.6556535",
"0.6556535",
"0.6455209",
"0.6446012",
"0.6434365",
"0.6434365",
"0.6425946"
] |
0.8476028
|
0
|
Obtain the value of ip_families.
|
def get_ip_families(self) -> Union[List[str], None]:
# read the original value passed by the command
ip_families = self.raw_param.get("ip_families")
# normalize
ip_families = extract_comma_separated_string(ip_families, keep_none=True, default_value=[])
# try to read the property value corresponding to the parameter from the `mc` object
if self.mc and self.mc.network_profile and self.mc.network_profile.ip_families is not None:
ip_families = self.mc.network_profile.ip_families
# this parameter does not need dynamic completion
# this parameter does not need validation
return ip_families
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def ip_families(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'IpFamily']]]]]:\n return pulumi.get(self, \"ip_families\")",
"def get_families(self):\n return self.__make_api_call('get/families')",
"def list_families(self):\n return self.__make_api_call('list/families')",
"def get_families(instance):\n families = instance.data.get(\"families\", [])\n family = instance.data.get(\"family\")\n if family:\n families.append(family)\n return set(families)",
"def family_ids(self):\n\n return self._family_ids",
"def address_family(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"address_family\")",
"def ip_addresses(self):\n try:\n return socket.gethostbyaddr(self.fqdn)[-1]\n except socket.error as _:\n return ['127.0.0.1']",
"def families(self):\n\n return [get_target_family_by_id(i) for i in self._family_ids]",
"def address_family(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"address_family\")",
"def address_family(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address_family\")",
"def __repr__(self):\n return str(self.families)",
"def ipaddrs( host ):\n return socket.gethostbyaddr(host)[2][0]",
"def get_nh_family(self):\n return int(self.get('nhr_family'))",
"def bgp_peers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadBalancerBgpPeerArgs']]]]:\n return pulumi.get(self, \"bgp_peers\")",
"def get_addrs(self) -> List[Multiaddr]:",
"def get_families() -> list:\n if not mainloop._initialized:\n raise RuntimeError(\"the mainloop needs to be initialized\")\n if not _family_cache:\n # The wrapper function can return anything iterable.\n _family_cache.add('Monospace')\n _family_cache.update(_get_wrapper('font:get_families')())\n # It's important to return a copy here because someone might\n # mutate the returned list.\n return sorted(_family_cache, key=str.casefold)",
"def AddressFamily(self) -> AddressFamily:",
"def matching_address_families(\n self, address_families_dict: Mapping[str, \"AddressFamily\"]\n ) -> Tuple[\"AddressFamily\", ...]:",
"def excluded_instance_families(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"excluded_instance_families\")",
"def family_keys(self, fam=None):\n if fam is not None:\n return [x for x in self._family_arrays if fam in self._family_arrays[x]]\n else:\n return list(self._family_arrays.keys())",
"def matching_addresses(\n self, address_families: Sequence[\"AddressFamily\"]\n ) -> Sequence[Tuple[Address, TargetAdaptor]]:\n return tuple(\n itertools.chain.from_iterable(\n af.addresses_to_target_adaptors.items() for af in address_families\n )\n )",
"def ipv6_addresses(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"ipv6_addresses\")",
"def get_families(self):\n # Implemented from template for\n # osid.resource.BinLookupSession.get_bins_template\n # NOTE: This implementation currently ignores plenary view\n if self._catalog_session is not None:\n return self._catalog_session.get_catalogs()\n collection = JSONClientValidated('relationship',\n collection='Family',\n runtime=self._runtime)\n result = collection.find().sort('_id', DESCENDING)\n\n return objects.FamilyList(result, runtime=self._runtime, proxy=self._proxy)",
"def ipv6_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ipv6_addresses\")",
"def get_addrs(self):\n # TODO check if server is listening\n return self.multiaddrs",
"def families(self):\n out = []\n start = {}\n for fam in family._registry:\n sl = self._get_family_slice(fam)\n if sl.start != sl.stop:\n out.append(fam)\n start[fam] = (sl.start)\n out.sort(key=start.__getitem__)\n return out",
"def bgp_peers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BgpPeerArgs']]]]:\n return pulumi.get(self, \"bgp_peers\")",
"def ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpMappingArgs']]]]:\n return pulumi.get(self, \"ip_addresses\")",
"def get_supervisor_addresses(self) -> List[str]:",
"def _get_ipv4_addresses(self, host: str) -> Dict[str, List[IPv4Address]]:\n if host == \"self\":\n command = \"show ip address\"\n elif host == \"peer\":\n command = \"failover exec mate show ip address\"\n\n show_ip_address = self.show(command)\n re_ip_addresses = RE_SHOW_IP_ADDRESS.findall(show_ip_address)\n\n results = {\n interface: [IPv4Interface(f\"{address}/{netmask}\")] for interface, address, netmask in re_ip_addresses\n }\n log.debug(\"Host %s: ip interfaces %s\", self.host)\n return results"
] |
[
"0.85059047",
"0.68266314",
"0.6718511",
"0.6189531",
"0.6115941",
"0.6090748",
"0.6053435",
"0.58633566",
"0.5686859",
"0.5685495",
"0.5653017",
"0.5608316",
"0.5577426",
"0.55728847",
"0.5570815",
"0.55400455",
"0.5529794",
"0.55246115",
"0.5430281",
"0.54093844",
"0.5331535",
"0.5311604",
"0.5292552",
"0.5249397",
"0.5244732",
"0.52380466",
"0.52278894",
"0.5223134",
"0.52091265",
"0.52022874"
] |
0.81734425
|
1
|
Obtain the value of network_plugin_mode.
|
def get_network_plugin_mode(self) -> Union[str, None]:
return self._get_network_plugin_mode(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def network_plugin_mode(self) -> Optional[pulumi.Input[Union[str, 'NetworkPluginMode']]]:\n return pulumi.get(self, \"network_plugin_mode\")",
"def network_mode(self) -> Optional[pulumi.Input[Union[str, 'NetworkMode']]]:\n return pulumi.get(self, \"network_mode\")",
"def network_plugin(self) -> Optional[pulumi.Input[Union[str, 'NetworkPlugin']]]:\n return pulumi.get(self, \"network_plugin\")",
"def networkMode(self):\n\n response = self.at.sendCommand(\"AT+CEREG?\")\n\n # If we failed to query the network mode, that's a paddlin'\n if not response:\n raise modem.AtError(response, \"Failed to query network mode\")\n\n lines = response.lines\n\n if len(lines) < 1:\n raise modem.AtError(response, \"Invalid network mode response\")\n\n fields = lines[0].split(\",\")\n\n # If there isn't at least the prefix and the current mode, that's a\n # paddlin'\n if len(fields) < 2:\n raise modem.AtError(response, \"Invalid network mode response\")\n\n try:\n return int(fields[1])\n\n except ValueError:\n raise modem.AtError(response, \"Invalid network mode\")",
"def getPanelMode(self) -> str:\r\n if self.visprotocol is not None:\r\n return self.visprotocol.getPanelMode()\r\n return \"Not Connected\"",
"def get_mode(self):\r\n return self._api.get_mode()",
"def get_mode(self, ):\n return self.get_parameter('mode')",
"def get_network_plugin(self) -> Union[str, None]:\n\n return self._get_network_plugin(enable_validation=True)",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def get_mode(self):\r\n return self.mode",
"def getmode(self):\n return self.mode",
"def game_mode(self):\n return self._get(\"game_mode\")",
"def mode(self):\n return self._data.get('mode', None)",
"def getMode(self):\n return self._mode",
"def get_window_mode(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetWindowMode', self.handle)",
"def get_vncmode(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetVNCMode', self.handle)",
"def read_configuration_mode(self):\n configuration_mode = self.scpi_comm('CONFIG?').strip()\n mode = 'Unknown'\n if configuration_mode == '0':\n mode = 'Voltage tracking'\n if configuration_mode == '2':\n mode = 'Dual output'\n if configuration_mode in ('3', '4'):\n mode = 'Track Voltage and Current'\n return mode",
"def getMode(self):\n with self.lock:\n mode = self.mode\n return mode",
"def mode(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.MODE, self._SW_VER)]",
"def mode(self) -> str:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> str:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")",
"def _get_mode():\n return context.get_context('mode')",
"def get_mode_parameter(mode):\n if mode == 'job':\n return 'cli'\n elif mode == 'serve':\n return 'serving'\n else:\n return mode",
"def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)",
"def mode(self):\n return self._lift(\"mode\")",
"def get_default_MXNet_mode():\n return MXNET_DEFAULT_MODE",
"def currentMode(self):\n logger.debug(\"Func: currentMode/getter\")\n\n return self._currentsDict[\"currentMode\"]",
"def mode(self):\r\n return self._mode"
] |
[
"0.86007",
"0.72097677",
"0.7105048",
"0.6824493",
"0.6790282",
"0.6675465",
"0.6665327",
"0.6518027",
"0.65160686",
"0.6483247",
"0.6473169",
"0.6433205",
"0.63309723",
"0.631305",
"0.63063604",
"0.627179",
"0.6144949",
"0.60640067",
"0.6043401",
"0.60211957",
"0.60211957",
"0.6014347",
"0.6014347",
"0.60142255",
"0.601232",
"0.59864587",
"0.59725887",
"0.5951958",
"0.59421986",
"0.59334534"
] |
0.8416459
|
1
|
Obtain the value of network_plugin.
|
def get_network_plugin(self) -> Union[str, None]:
return self._get_network_plugin(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def network_plugin(self) -> Optional[pulumi.Input[Union[str, 'NetworkPlugin']]]:\n return pulumi.get(self, \"network_plugin\")",
"def network_plugin_mode(self) -> Optional[pulumi.Input[Union[str, 'NetworkPluginMode']]]:\n return pulumi.get(self, \"network_plugin_mode\")",
"def get_network_plugin_mode(self) -> Union[str, None]:\n return self._get_network_plugin_mode(enable_validation=True)",
"def _get_network_plugin(self, enable_validation: bool = False) -> Union[str, None]:\n # read the original value passed by the command\n network_plugin = self.raw_param.get(\"network_plugin\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.network_plugin is not None\n ):\n network_plugin = self.mc.network_profile.network_plugin\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n (\n pod_cidr,\n service_cidr,\n dns_service_ip,\n docker_bridge_address,\n network_policy,\n ) = self._get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(\n enable_validation=False\n )\n network_plugin_mode = self._get_network_plugin_mode(enable_validation=False)\n if network_plugin:\n if network_plugin == \"azure\" and pod_cidr and network_plugin_mode != \"overlay\":\n raise InvalidArgumentValueError(\n \"Please specify network plugin mode `overlay` when using --pod-cidr or \"\n \"use network plugin `kubenet`. For more information about Azure CNI \"\n \"Overlay please see https://aka.ms/aks/azure-cni-overlay\"\n )\n else:\n if (\n pod_cidr or\n service_cidr or\n dns_service_ip or\n docker_bridge_address or\n network_policy\n ):\n raise RequiredArgumentMissingError(\n \"Please explicitly specify the network plugin type\"\n )\n return network_plugin",
"def get_network(self) -> Optional[str]:\n return self.get_value(self._network_attribute)",
"def get_network(self):\n return self._network",
"def get_network(self):\n return self.get_ip_network()[-1]",
"def network(self):\n return self.__network",
"def network_interface(self): \n return self._network_interface",
"def network(self):\n return self._network",
"def network(self):\n return self._network",
"def network(self):\n return self._network",
"def network(self) -> str:\n return pulumi.get(self, \"network\")",
"def network(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"network\")",
"def _get_tunnel_metric(self):\n return self.__tunnel_metric",
"def network(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network\")",
"def getNet(self):\n\t\treturn self.loader",
"def get_plugin_setting(self, plugin, parameter):\n asserts.assert_true(\n self.fuel_web.check_plugin_exists(self.cluster_id, plugin),\n \"Plugin {0} isn't found.\".format(plugin))\n\n attributes = self.nailgun_client.get_cluster_attributes(\n self.cluster_id)\n attributes = attributes['editable'][plugin]\n\n value = None\n for item in attributes['metadata']['versions']:\n if (parameter in item and\n item['metadata']['plugin_id'] ==\n attributes['metadata']['chosen_id']):\n value = item[parameter]['value']\n break\n asserts.assert_is_not_none(\n value, \"Could not find parameter {0} for plugin {1}\".format(\n parameter, plugin))\n return value",
"def network_config(self) -> pulumi.Output['outputs.PrivateCloudNetworkConfig']:\n return pulumi.get(self, \"network_config\")",
"def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")",
"def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")",
"def get(self,num):\n\t\t_result = None\n\t\tif num in self._protocols:\n\t\t\t_result = self._protocols[num]\n\n\t\treturn _result",
"def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")",
"def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")",
"def network_settings(self): # type: () -> t.Dict[str, t.Any]\n return self.inspection['NetworkSettings']",
"def get_plugin_value(self, plugin, key, default=None):\n plugin = plugin.lower()\n session = self.ssession()\n try:\n result = session.query(PluginValues) \\\n .filter(PluginValues.plugin == plugin)\\\n .filter(PluginValues.key == key) \\\n .one_or_none()\n if result is not None:\n result = result.value\n elif default is not None:\n result = default\n return _deserialize(result)\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n self.ssession.remove()",
"def network_config(self) -> Optional[pulumi.Input['NodeNetworkConfigArgs']]:\n return pulumi.get(self, \"network_config\")",
"def network_profile(self) -> Optional['outputs.ClusterPoolResourcePropertiesResponseNetworkProfile']:\n return pulumi.get(self, \"network_profile\")",
"def _external_network(self):\n try:\n router = next(self._connection.network.routers.all())\n except StopIteration:\n raise errors.ImproperlyConfiguredError('Could not find tenancy router.')\n return self._connection.network.networks.get(router.external_gateway_info['network_id'])",
"def get_nh_tun_sip(self):\n return int(self.get('nhr_tun_sip'))"
] |
[
"0.77019167",
"0.67851454",
"0.66011715",
"0.651455",
"0.65120023",
"0.63674635",
"0.62610763",
"0.6181945",
"0.6164135",
"0.61154985",
"0.61154985",
"0.61154985",
"0.60768074",
"0.6068218",
"0.59530205",
"0.5934324",
"0.59288955",
"0.5928412",
"0.58149225",
"0.58015245",
"0.58015245",
"0.5789082",
"0.5770301",
"0.5770301",
"0.5690522",
"0.56715786",
"0.56646293",
"0.56604093",
"0.5644825",
"0.56417906"
] |
0.7122065
|
1
|
Get the value of network_dataplane.
|
def get_network_dataplane(self) -> Union[str, None]:
return self.raw_param.get("network_dataplane")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def network_dataplane(self) -> Optional[pulumi.Input[Union[str, 'NetworkDataplane']]]:\n return pulumi.get(self, \"network_dataplane\")",
"def getData(self):\n return struct.unpack(\"!d\",self.data)[0]",
"def getData(self):\n return struct.unpack(\"!f\",self.data)[0]",
"def get_dataplane_state(self, path, params):\n reply = self._faucet_collector.get_dataplane_state()\n self._augment_state_reply(reply, path)\n return reply",
"def value(self):\n return self.data",
"def get_input_data_value(node: Node, port: int):\n return node.in_port(port).data.get_value()",
"def getData(self):\n return struct.unpack(\"!I\",self.data)[0]",
"def getData(self):\n return struct.unpack(\"!I\",self.data)[0]",
"def getd(self, node):\n\n return self.daq.getDouble(f'/{self.device_id}/{node}')",
"def get_value(self, data):\n value = data['value']\n return value",
"def _value(self):\n return self.device.value(*self._id[1:])",
"def get_lane(self):\n return self.lane",
"def getData(self):\n return struct.unpack(\"!Q\",self.data)[0]",
"def getData(self):\n return struct.unpack(\"!Q\",self.data)[0]",
"def value(self):\n return self._data",
"def get_value(self):\n return self.__edge_value",
"def value(self) -> int:\n return self._data",
"def _get_tunnel_metric(self):\n return self.__tunnel_metric",
"def get_value(self):\n return self.sensor.get_value()",
"def get_data(self, variable):\n return self.data.get(variable)",
"def get_value(self):\n coord = np.round(self.coordinates).astype(int)\n if self.multichannel:\n shape = self._data_view.shape[:-1]\n else:\n shape = self._data_view.shape\n\n if all(0 <= c < s for c, s in zip(coord[self.dims.displayed], shape)):\n value = (\n self.data_level,\n self._data_view[tuple(coord[self.dims.displayed])],\n )\n else:\n value = None\n\n return value",
"def value(self):\r\n return self._data['value']",
"def value(self):\n return self.get_data(\"value\")",
"def getOutputValue(self):\n return DPxGetDinDataOut()",
"def data(value):\n return value.data",
"def data(self):\n return self.d",
"def data(value):\n return value",
"def value(self):\n return self.piece_behavior.value",
"def value(self):\n return self._read()",
"def getvalue(self):\n return str(self.data)"
] |
[
"0.78302395",
"0.633378",
"0.61172324",
"0.60053504",
"0.59994256",
"0.59934694",
"0.5991421",
"0.5991421",
"0.59132063",
"0.59038144",
"0.5899162",
"0.5887311",
"0.588673",
"0.588673",
"0.5824124",
"0.58012956",
"0.57815427",
"0.57774425",
"0.57729685",
"0.57243156",
"0.57060456",
"0.5683031",
"0.5646895",
"0.56243455",
"0.5612927",
"0.5529234",
"0.55273503",
"0.55173254",
"0.5507244",
"0.5503227"
] |
0.7886303
|
0
|
Internal function to obtain the value of pod_cidr, service_cidr, dns_service_ip, docker_bridge_address and network_policy.
|
def _get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(
self, enable_validation: bool = False
) -> Tuple[
Union[str, None],
Union[str, None],
Union[str, None],
Union[str, None],
Union[str, None],
]:
# get network profile from `mc`
network_profile = None
if self.mc:
network_profile = self.mc.network_profile
# pod_cidr
# read the original value passed by the command
pod_cidr = self.raw_param.get("pod_cidr")
# try to read the property value corresponding to the parameter from the `mc` object
# pod_cidr is allowed to be updated so only read from mc object during creates
if self.decorator_mode == DecoratorMode.CREATE:
if network_profile and network_profile.pod_cidr is not None:
pod_cidr = network_profile.pod_cidr
# service_cidr
# read the original value passed by the command
service_cidr = self.raw_param.get("service_cidr")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.service_cidr is not None:
service_cidr = network_profile.service_cidr
# dns_service_ip
# read the original value passed by the command
dns_service_ip = self.raw_param.get("dns_service_ip")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.dns_service_ip is not None:
dns_service_ip = network_profile.dns_service_ip
# network_policy
# read the original value passed by the command
network_policy = self.raw_param.get("network_policy")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.network_policy is not None:
network_policy = network_profile.network_policy
# these parameters do not need dynamic completion
# validation
if enable_validation:
network_plugin = self._get_network_plugin(enable_validation=False)
if not network_plugin:
if (
pod_cidr or
service_cidr or
dns_service_ip or
network_policy
):
raise RequiredArgumentMissingError(
"Please explicitly specify the network plugin type"
)
return pod_cidr, service_cidr, dns_service_ip, None, network_policy
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(\n self,\n ) -> Tuple[\n Union[str, None],\n Union[str, None],\n Union[str, None],\n Union[str, None],\n Union[str, None],\n ]:\n return self._get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(\n enable_validation=True\n )",
"def pod_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pod_cidr\")",
"def pod_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pod_cidr\")",
"def _GetPod(self) -> Dict[str, Any]:\n stdout, _, _ = RunKubectlCommand(['get', 'pod', self.name, '-o', 'yaml'])\n pod = yaml.safe_load(stdout)\n self.ip_address = pod.get('status', {}).get('podIP')\n return pod",
"def service_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_cidr\")",
"def service_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_cidr\")",
"def service_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_cidr\")",
"def _GetIpAddress(self):\n ingress_name = '%s-ingress' % self.name\n get_cmd = [\n 'get', 'ing', ingress_name, '-o',\n 'jsonpath={.status.loadBalancer.ingress[*].ip}'\n ]\n stdout, _, _ = RunKubectlCommand(get_cmd)\n ip_address = stdout\n if ip_address:\n self.ip_address = ip_address",
"def get_pod_cidrs(self) -> Union[List[str], None]:\n # read the original value passed by the command\n pod_cidrs = self.raw_param.get(\"pod_cidrs\")\n # normalize\n pod_cidrs = extract_comma_separated_string(pod_cidrs, keep_none=True, default_value=[])\n # try to read the property value corresponding to the parameter from the `mc` object\n if self.mc and self.mc.network_profile and self.mc.network_profile.pod_cidrs is not None:\n pod_cidrs = self.mc.network_profile.pod_cidrs\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return pod_cidrs",
"def get_appgw_subnet_cidr(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_SUBNET_CIDR = addon_consts.get(\"CONST_INGRESS_APPGW_SUBNET_CIDR\")\n\n # read the original value passed by the command\n appgw_subnet_cidr = self.raw_param.get(\"appgw_subnet_cidr\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR) is not None\n ):\n appgw_subnet_cidr = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_subnet_cidr",
"def build_dp_variables(dp):\n v = {}\n for dps in dp.deployedpackageservice_set.all():\n # 1st pass: build network variables\n if dps.address:\n for net in json.loads(dps.address):\n for nn in net: # net name\n for a in net[nn]: # address\n if \":\" in a: # XXX: do better\n a_type = \"ipv6\"\n else:\n a_type = \"ipv4\"\n v[\"%s.%s_%s\" % (dps.service.ident, nn, a_type)] = a\n # 2nd pass: pull in all Chef's attributes from the \"default\"\n # and the \"normal\" keys of \"knife node show -l\"\n try:\n attrs = json.loads(exec_knife(\"node show -l -F json %s\" % dps.hostname))\n flatten_dict(dps.service.ident, v, attrs[u\"default\"])\n flatten_dict(dps.service.ident, v, attrs[u\"normal\"])\n except:\n pass\n return v",
"def get_cidr_param ( cidr_param ) :\n if cidr_param == 'HBO' :\n cidr_param = hbo_cidr_list\n elif cidr_param == 'HBO-UK' :\n cidr_param = hbo_uk_cidr_list\n elif cidr_param == 'ALL' :\n cidr_param = [ all_ip_cidr ]\n elif cidr_param == 'ESPv4-UAT' :\n cidr_param = [ esp_nonprod[ 'espv4_cidr' ] ]\n elif cidr_param == 'ESPv4-PROD' :\n cidr_param = [ esp_prod[ 'espv4_cidr' ] ]\n else :\n cidr_param = [ cidr_param ]\n\n return cidr_param",
"def get_container_ip(self, container):\n info = self.inspect_container(container)\n if not info:\n return None\n\n netInfo = info['NetworkSettings']\n if not netInfo:\n return None\n\n ip = netInfo['IPAddress']\n if not ip:\n return None\n\n return ip",
"def getProperties(self, key):\n try:\n key = unpack('>L', inet_aton(key))[0]\n except(error):\n return None\n\n bit = self._MAX_IPV4_BIT\n value = None\n node = self._ROOT_PTR\n\n while(node != self._NULL_PTR):\n if self._treeProperties[node] != None:\n value = self._treeProperties[node]\n\n if (key & bit) != 0:\n node = self._treeRights[node]\n else:\n node = self._treeLefts[node]\n bit >>= 1\n\n return value",
"def __init__(__self__, *,\n dns_service_ip: Optional[pulumi.Input[str]] = None,\n ip_families: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'IpFamily']]]]] = None,\n kube_proxy_config: Optional[pulumi.Input['ContainerServiceNetworkProfileKubeProxyConfigArgs']] = None,\n load_balancer_profile: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileArgs']] = None,\n load_balancer_sku: Optional[pulumi.Input[Union[str, 'LoadBalancerSku']]] = None,\n monitoring: Optional[pulumi.Input['NetworkMonitoringArgs']] = None,\n nat_gateway_profile: Optional[pulumi.Input['ManagedClusterNATGatewayProfileArgs']] = None,\n network_dataplane: Optional[pulumi.Input[Union[str, 'NetworkDataplane']]] = None,\n network_mode: Optional[pulumi.Input[Union[str, 'NetworkMode']]] = None,\n network_plugin: Optional[pulumi.Input[Union[str, 'NetworkPlugin']]] = None,\n network_plugin_mode: Optional[pulumi.Input[Union[str, 'NetworkPluginMode']]] = None,\n network_policy: Optional[pulumi.Input[Union[str, 'NetworkPolicy']]] = None,\n outbound_type: Optional[pulumi.Input[Union[str, 'OutboundType']]] = None,\n pod_cidr: Optional[pulumi.Input[str]] = None,\n pod_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n service_cidr: Optional[pulumi.Input[str]] = None,\n service_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if dns_service_ip is None:\n dns_service_ip = '10.0.0.10'\n if dns_service_ip is not None:\n pulumi.set(__self__, \"dns_service_ip\", dns_service_ip)\n if ip_families is not None:\n pulumi.set(__self__, \"ip_families\", ip_families)\n if kube_proxy_config is not None:\n pulumi.set(__self__, \"kube_proxy_config\", kube_proxy_config)\n if load_balancer_profile is not None:\n pulumi.set(__self__, \"load_balancer_profile\", load_balancer_profile)\n if load_balancer_sku is not None:\n pulumi.set(__self__, \"load_balancer_sku\", load_balancer_sku)\n if monitoring is not None:\n pulumi.set(__self__, \"monitoring\", monitoring)\n if nat_gateway_profile is not None:\n pulumi.set(__self__, \"nat_gateway_profile\", nat_gateway_profile)\n if network_dataplane is not None:\n pulumi.set(__self__, \"network_dataplane\", network_dataplane)\n if network_mode is not None:\n pulumi.set(__self__, \"network_mode\", network_mode)\n if network_plugin is not None:\n pulumi.set(__self__, \"network_plugin\", network_plugin)\n if network_plugin_mode is not None:\n pulumi.set(__self__, \"network_plugin_mode\", network_plugin_mode)\n if network_policy is not None:\n pulumi.set(__self__, \"network_policy\", network_policy)\n if outbound_type is None:\n outbound_type = 'loadBalancer'\n if outbound_type is not None:\n pulumi.set(__self__, \"outbound_type\", outbound_type)\n if pod_cidr is None:\n pod_cidr = '10.244.0.0/16'\n if pod_cidr is not None:\n pulumi.set(__self__, \"pod_cidr\", pod_cidr)\n if pod_cidrs is not None:\n pulumi.set(__self__, \"pod_cidrs\", pod_cidrs)\n if service_cidr is None:\n service_cidr = '10.0.0.0/16'\n if service_cidr is not None:\n pulumi.set(__self__, \"service_cidr\", service_cidr)\n if service_cidrs is not None:\n pulumi.set(__self__, \"service_cidrs\", service_cidrs)",
"def cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cidr\")",
"def perform(self, resource_handler):\n instance_id = ast.literal_eval(self.instance_data['instance_id'])['Id']\n info = resource_handler.cli.inspect_container(container=instance_id)\n ip_addresses = []\n for k, v in info['NetworkSettings']['Networks'].items():\n ip_addresses.append(v['IPAddress'])\n return ip_addresses[0]",
"def perform(self, resource_handler):\n instance_id = ast.literal_eval(self.instance_data['instance_id'])['Id']\n info = resource_handler.cli.inspect_container(container=instance_id)\n ip_addresses = []\n for k, v in info['NetworkSettings']['Networks'].items():\n ip_addresses.append(v['IPAddress'])\n return ip_addresses[0]",
"def get_variables(enclosure_name=None):\n variables = enclosure_defaults\n\n # Get enclosure configuration\n if enclosure_name is not None:\n print \"enclosure name: %s\" % enclosure_name\n enclosure_configuration = get_enclosure_configuration(enclosure_name)\n if enclosure_configuration is not None:\n for key in enclosure_configuration:\n variables[key] = enclosure_configuration[key]\n origIP = variables['EM_IP']\n print \"EM_IP is Static: %s.\" % variables['EM_IP']\n variables['EM_IP'] = get_enclosure_manager_ip(variables)\n if variables['EM_IP'] == None:\n variables['EM_IP'] = origIP\n print \"EM_IP is FloatingIp: %s.\" % variables['EM_IP']\n else:\n print \"WARNING: Enclosure '%s' is not known configuration.\" % enclosure_name\n return variables",
"def cidr(self):\n return self._cidr",
"def ipvs_config(self) -> Optional[pulumi.Input['ContainerServiceNetworkProfileIpvsConfigArgs']]:\n return pulumi.get(self, \"ipvs_config\")",
"def get_case_variables_config() -> Any:\n return schemas.CaseVariableConfig.get_variables_config()",
"def get_container_ip(container, network_name=DOCK_NETWORK_NAME):\n\n return str(container.attrs['NetworkSettings']['Networks'][network_name]['IPAddress'])",
"def __init__(__self__, *,\n dns_service_ip: Optional[pulumi.Input[str]] = None,\n load_balancer_profile: Optional[pulumi.Input['LoadBalancerProfileArgs']] = None,\n load_balancer_sku: Optional[pulumi.Input[Union[str, 'LoadBalancerSku']]] = None,\n network_policy: Optional[pulumi.Input[Union[str, 'NetworkPolicy']]] = None,\n pod_cidr: Optional[pulumi.Input[str]] = None,\n pod_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n service_cidr: Optional[pulumi.Input[str]] = None,\n service_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if dns_service_ip is not None:\n pulumi.set(__self__, \"dns_service_ip\", dns_service_ip)\n if load_balancer_profile is not None:\n pulumi.set(__self__, \"load_balancer_profile\", load_balancer_profile)\n if load_balancer_sku is None:\n load_balancer_sku = 'unmanaged'\n if load_balancer_sku is not None:\n pulumi.set(__self__, \"load_balancer_sku\", load_balancer_sku)\n if network_policy is None:\n network_policy = 'calico'\n if network_policy is not None:\n pulumi.set(__self__, \"network_policy\", network_policy)\n if pod_cidr is not None:\n pulumi.set(__self__, \"pod_cidr\", pod_cidr)\n if pod_cidrs is not None:\n pulumi.set(__self__, \"pod_cidrs\", pod_cidrs)\n if service_cidr is not None:\n pulumi.set(__self__, \"service_cidr\", service_cidr)\n if service_cidrs is not None:\n pulumi.set(__self__, \"service_cidrs\", service_cidrs)",
"def dns_service_ip(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns_service_ip\")",
"def dns_service_ip(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns_service_ip\")",
"def dns_service_ip(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns_service_ip\")"
] |
[
"0.770353",
"0.56647307",
"0.56647307",
"0.5606011",
"0.5239462",
"0.5239462",
"0.5239462",
"0.51047766",
"0.50487715",
"0.50297356",
"0.5013917",
"0.49550694",
"0.4856301",
"0.47989824",
"0.47734207",
"0.47114637",
"0.47114637",
"0.4709715",
"0.4709715",
"0.47072223",
"0.47072223",
"0.4695164",
"0.46352014",
"0.46302232",
"0.46266678",
"0.46194252",
"0.46014643",
"0.4575127",
"0.4575127",
"0.4575127"
] |
0.6905621
|
1
|
Obtain the value of pod_cidr, service_cidr, dns_service_ip, docker_bridge_address and network_policy.
|
def get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(
self,
) -> Tuple[
Union[str, None],
Union[str, None],
Union[str, None],
Union[str, None],
Union[str, None],
]:
return self._get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(
enable_validation=True
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(\n self, enable_validation: bool = False\n ) -> Tuple[\n Union[str, None],\n Union[str, None],\n Union[str, None],\n Union[str, None],\n Union[str, None],\n ]:\n # get network profile from `mc`\n network_profile = None\n if self.mc:\n network_profile = self.mc.network_profile\n\n # pod_cidr\n # read the original value passed by the command\n pod_cidr = self.raw_param.get(\"pod_cidr\")\n # try to read the property value corresponding to the parameter from the `mc` object\n # pod_cidr is allowed to be updated so only read from mc object during creates\n if self.decorator_mode == DecoratorMode.CREATE:\n if network_profile and network_profile.pod_cidr is not None:\n pod_cidr = network_profile.pod_cidr\n\n # service_cidr\n # read the original value passed by the command\n service_cidr = self.raw_param.get(\"service_cidr\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if network_profile and network_profile.service_cidr is not None:\n service_cidr = network_profile.service_cidr\n\n # dns_service_ip\n # read the original value passed by the command\n dns_service_ip = self.raw_param.get(\"dns_service_ip\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if network_profile and network_profile.dns_service_ip is not None:\n dns_service_ip = network_profile.dns_service_ip\n\n # network_policy\n # read the original value passed by the command\n network_policy = self.raw_param.get(\"network_policy\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if network_profile and network_profile.network_policy is not None:\n network_policy = network_profile.network_policy\n\n # these parameters do not need dynamic completion\n\n # validation\n if enable_validation:\n network_plugin = self._get_network_plugin(enable_validation=False)\n if not network_plugin:\n if (\n pod_cidr or\n service_cidr or\n dns_service_ip or\n network_policy\n ):\n raise RequiredArgumentMissingError(\n \"Please explicitly specify the network plugin type\"\n )\n return pod_cidr, service_cidr, dns_service_ip, None, network_policy",
"def pod_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pod_cidr\")",
"def pod_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pod_cidr\")",
"def _GetPod(self) -> Dict[str, Any]:\n stdout, _, _ = RunKubectlCommand(['get', 'pod', self.name, '-o', 'yaml'])\n pod = yaml.safe_load(stdout)\n self.ip_address = pod.get('status', {}).get('podIP')\n return pod",
"def service_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_cidr\")",
"def service_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_cidr\")",
"def service_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_cidr\")",
"def get_appgw_subnet_cidr(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_SUBNET_CIDR = addon_consts.get(\"CONST_INGRESS_APPGW_SUBNET_CIDR\")\n\n # read the original value passed by the command\n appgw_subnet_cidr = self.raw_param.get(\"appgw_subnet_cidr\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR) is not None\n ):\n appgw_subnet_cidr = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_subnet_cidr",
"def get_pod_cidrs(self) -> Union[List[str], None]:\n # read the original value passed by the command\n pod_cidrs = self.raw_param.get(\"pod_cidrs\")\n # normalize\n pod_cidrs = extract_comma_separated_string(pod_cidrs, keep_none=True, default_value=[])\n # try to read the property value corresponding to the parameter from the `mc` object\n if self.mc and self.mc.network_profile and self.mc.network_profile.pod_cidrs is not None:\n pod_cidrs = self.mc.network_profile.pod_cidrs\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return pod_cidrs",
"def cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cidr\")",
"def network_policy_config(self) -> Optional[pulumi.Input['NetworkPolicyConfigArgs']]:\n return pulumi.get(self, \"network_policy_config\")",
"def getProperties(self, key):\n try:\n key = unpack('>L', inet_aton(key))[0]\n except(error):\n return None\n\n bit = self._MAX_IPV4_BIT\n value = None\n node = self._ROOT_PTR\n\n while(node != self._NULL_PTR):\n if self._treeProperties[node] != None:\n value = self._treeProperties[node]\n\n if (key & bit) != 0:\n node = self._treeRights[node]\n else:\n node = self._treeLefts[node]\n bit >>= 1\n\n return value",
"def network_policy(self) -> 'outputs.NetworkPolicyResponse':\n return pulumi.get(self, \"network_policy\")",
"def _GetIpAddress(self):\n ingress_name = '%s-ingress' % self.name\n get_cmd = [\n 'get', 'ing', ingress_name, '-o',\n 'jsonpath={.status.loadBalancer.ingress[*].ip}'\n ]\n stdout, _, _ = RunKubectlCommand(get_cmd)\n ip_address = stdout\n if ip_address:\n self.ip_address = ip_address",
"def cidr(self):\n return self._cidr",
"def network_configuration(self) -> pulumi.Output['outputs.ServiceNetworkConfiguration']:\n return pulumi.get(self, \"network_configuration\")",
"def get_container_ip(self, container):\n info = self.inspect_container(container)\n if not info:\n return None\n\n netInfo = info['NetworkSettings']\n if not netInfo:\n return None\n\n ip = netInfo['IPAddress']\n if not ip:\n return None\n\n return ip",
"def network_config(self) -> pulumi.Output['outputs.PrivateCloudNetworkConfig']:\n return pulumi.get(self, \"network_config\")",
"def getValue(self,value):\n if value in self.header.keys():\n return self.header[value]\n if value in self.subintinfo.keys():\n return self.subintinfo[value][-1]\n if self.params is None:\n return None\n return self.params.get(value) #will return None if non-existent",
"def ipvs_config(self) -> Optional[pulumi.Input['ContainerServiceNetworkProfileIpvsConfigArgs']]:\n return pulumi.get(self, \"ipvs_config\")",
"def cidr(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cidr\")",
"def perform(self, resource_handler):\n instance_id = ast.literal_eval(self.instance_data['instance_id'])['Id']\n info = resource_handler.cli.inspect_container(container=instance_id)\n ip_addresses = []\n for k, v in info['NetworkSettings']['Networks'].items():\n ip_addresses.append(v['IPAddress'])\n return ip_addresses[0]",
"def perform(self, resource_handler):\n instance_id = ast.literal_eval(self.instance_data['instance_id'])['Id']\n info = resource_handler.cli.inspect_container(container=instance_id)\n ip_addresses = []\n for k, v in info['NetworkSettings']['Networks'].items():\n ip_addresses.append(v['IPAddress'])\n return ip_addresses[0]",
"def get_case_variables_config() -> Any:\n return schemas.CaseVariableConfig.get_variables_config()",
"def network_access_policy(self) -> Optional[str]:\n return pulumi.get(self, \"network_access_policy\")",
"def get_cidr_param ( cidr_param ) :\n if cidr_param == 'HBO' :\n cidr_param = hbo_cidr_list\n elif cidr_param == 'HBO-UK' :\n cidr_param = hbo_uk_cidr_list\n elif cidr_param == 'ALL' :\n cidr_param = [ all_ip_cidr ]\n elif cidr_param == 'ESPv4-UAT' :\n cidr_param = [ esp_nonprod[ 'espv4_cidr' ] ]\n elif cidr_param == 'ESPv4-PROD' :\n cidr_param = [ esp_prod[ 'espv4_cidr' ] ]\n else :\n cidr_param = [ cidr_param ]\n\n return cidr_param"
] |
[
"0.6636231",
"0.5917717",
"0.5917717",
"0.5608019",
"0.53910077",
"0.53910077",
"0.53910077",
"0.5227576",
"0.5103053",
"0.5095557",
"0.5095557",
"0.50784636",
"0.50784636",
"0.5055723",
"0.49869743",
"0.4945056",
"0.4938862",
"0.48734993",
"0.48573723",
"0.48123866",
"0.4807148",
"0.47939405",
"0.4787169",
"0.47809142",
"0.47809142",
"0.47439912",
"0.47439912",
"0.47282603",
"0.4727013",
"0.4725805"
] |
0.76270926
|
0
|
Helper function to obtain the constants used by addons.
|
def get_addon_consts(self) -> Dict[str, str]:
from azure.cli.command_modules.acs._consts import (
ADDONS, CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_AZURE_POLICY_ADDON_NAME, CONST_CONFCOM_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME, CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_OPEN_SERVICE_MESH_ADDON_NAME, CONST_ROTATION_POLL_INTERVAL,
CONST_SECRET_ROTATION_ENABLED, CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME)
addon_consts = {}
addon_consts["ADDONS"] = ADDONS
addon_consts[
"CONST_ACC_SGX_QUOTE_HELPER_ENABLED"
] = CONST_ACC_SGX_QUOTE_HELPER_ENABLED
addon_consts[
"CONST_AZURE_POLICY_ADDON_NAME"
] = CONST_AZURE_POLICY_ADDON_NAME
addon_consts["CONST_CONFCOM_ADDON_NAME"] = CONST_CONFCOM_ADDON_NAME
addon_consts[
"CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME"
] = CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
addon_consts[
"CONST_INGRESS_APPGW_ADDON_NAME"
] = CONST_INGRESS_APPGW_ADDON_NAME
addon_consts[
"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID"
] = CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID
addon_consts[
"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME"
] = CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
addon_consts[
"CONST_INGRESS_APPGW_SUBNET_CIDR"
] = CONST_INGRESS_APPGW_SUBNET_CIDR
addon_consts[
"CONST_INGRESS_APPGW_SUBNET_ID"
] = CONST_INGRESS_APPGW_SUBNET_ID
addon_consts[
"CONST_INGRESS_APPGW_WATCH_NAMESPACE"
] = CONST_INGRESS_APPGW_WATCH_NAMESPACE
addon_consts[
"CONST_KUBE_DASHBOARD_ADDON_NAME"
] = CONST_KUBE_DASHBOARD_ADDON_NAME
addon_consts[
"CONST_MONITORING_ADDON_NAME"
] = CONST_MONITORING_ADDON_NAME
addon_consts[
"CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID"
] = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
addon_consts[
"CONST_OPEN_SERVICE_MESH_ADDON_NAME"
] = CONST_OPEN_SERVICE_MESH_ADDON_NAME
addon_consts[
"CONST_VIRTUAL_NODE_ADDON_NAME"
] = CONST_VIRTUAL_NODE_ADDON_NAME
addon_consts[
"CONST_VIRTUAL_NODE_SUBNET_NAME"
] = CONST_VIRTUAL_NODE_SUBNET_NAME
addon_consts[
"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME"
] = CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME
addon_consts[
"CONST_SECRET_ROTATION_ENABLED"
] = CONST_SECRET_ROTATION_ENABLED
addon_consts[
"CONST_ROTATION_POLL_INTERVAL"
] = CONST_ROTATION_POLL_INTERVAL
addon_consts[
"CONST_MONITORING_USING_AAD_MSI_AUTH"
] = CONST_MONITORING_USING_AAD_MSI_AUTH
return addon_consts
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def constants(self):\n return self.bot.constants",
"def constants(self):\n return self._constants",
"def get_constants(self):\n temp = self._properties.get('constants', [])\n return temp",
"def get_defined_constants():\n raise NotImplementedError()",
"def get_consts(self):\n consts = []\n for key in self.constants:\n consts.append({\n 'key': key,\n 'value': self.constants[key],\n })\n return consts",
"def constants(self):\n return self._constants",
"def get_constants(self):\n return self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12",
"def get_constants():\r\n\r\n dict_constants = dict()\r\n dict_constants['LOGIN_URL'] = \"https://yocket.in/account/login\"\r\n dict_constants['PAST_RESULTS_URL'] = \"https://yocket.in/recent-admits-rejects?page=\"\r\n dict_constants['ALL_RESULTS_URL'] = \"https://yocket.in/profiles/find/matching-admits-and-rejects?page=\"\r\n dict_constants['HOME_PAGE'] = 'https://yocket.in/'\r\n dict_constants['NUMBER_PAGE_TO_SCRAPE_FIRST'] = 1\r\n dict_constants['NUMBER_PAGE_TO_SCRAPE_LAST'] = 2\r\n dict_constants['MINIMUM_GPA'] = 7.5\r\n dict_constants['MINIMUM_GRE'] = 320\r\n dict_constants['MINIMUM_TOEFL'] = 100\r\n\r\n return dict_constants",
"def get_constants_list(self):\n return [self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12]",
"def api_constants():\n constants_body = json.loads(\"{\\\"version\\\": \\\"2.0\\\",\\\"method\\\": \\\"idoit.constants\\\",\\\"params\\\": {\\\"apikey\\\": \\\"\" +\n apikey + \"\\\",\\\"language\\\": \\\"en\\\"},\\\"id\\\": 1}\")\n try:\n s = requests.Session()\n constants_request = s.post(\n api_url, json=constants_body, headers=headers)\n constants = constants_request.json()\n return constants.get(\"result\")\n except requests.exceptions.RequestException:\n print(red + \"\\n>>> \" + reset +\n \"Unable to connect to the API. Please verify the connection information.\\n\")\n return None",
"def get_all_constants():\n return filter(\n lambda key: key.upper() == key and type(globals()[key]) in _ALLOWED,\n\n filter( # filter _PRIVATE variables\n lambda x: not x.startswith(\"_\"),\n globals()\n )\n )",
"def consts(consts):\n\n namespace = { }\n\n for c in consts:\n constname = c[\"constname\"]\n consttype = c[\"consttype\"]\n constval = c[\"constval\"]\n\n # Correct various values that won't evaluate in python.\n if constval == \"( SteamItemInstanceID_t ) ~ 0\":\n constval = \"-1\"\n elif constval == \"( ( uint32 ) 'd' << 16U ) | ( ( uint32 ) 'e' << 8U ) | ( uint32 ) 'v'\":\n constval = \"6579574\"\n else:\n constval = re.sub(r\"(0x[0-9a-fA-F]*)ull\", r\"\\1\", constval)\n\n # Evaluate the result, and place it into the namespace.\n value = eval(constval, namespace, namespace)\n namespace[constname] = value\n\n # Generate.\n mapped = map_type(consttype)\n\n if value > 0:\n p(f\"{constname} = {mapped}(0x{value:x})\")\n else:\n p(f\"{constname} = {mapped}({value})\")",
"def get_predefined_constant_names_latex():\n return \"t_0/t_g\", \"t_g\", r\"\\dot{\\varepsilon}\", \\\n \"E_1\", \"E_3\", r\"\\nu_{21}\", r\"\\nu_{31}\"",
"def _getConsts(self, imt):\r\n\r\n if (imt != self._pga and imt != self._pgv and imt != self._sa03 and\r\n imt != self._sa10 and imt != self._sa30):\r\n raise ValueError(\"Invalid IMT \" + str(imt))\r\n c = self._constants[imt]\r\n return (c)",
"def load_constants():\n with open(VARIABLES_DIR / \"constants.yaml\", \"r\", encoding=\"utf-8\") as stream:\n constants = yaml.safe_load(stream)\n\n return constants",
"def get_declarations(self):\n return \"extern const unsigned int %s;\\n\" % self.name",
"def get_definitions(self):\n return \"const unsigned int %s = 0x%xu;\\n\" % (self.name, self.address)",
"def __getConsts(self, imt):\n\n if 'PGA' in imt:\n c = self.__constants['pga']\n c2 = self.__constants2['pga']\n elif 'PGV' in imt:\n c = self.__constants['pgv']\n c2 = self.__constants2['pgv']\n elif 'SA' in imt:\n pp = imt.period\n if pp == 0.3:\n c = self.__constants['psa03']\n c2 = self.__constants2['psa03']\n elif pp == 1.0:\n c = self.__constants['psa10']\n c2 = self.__constants2['psa10']\n elif pp == 3.0:\n c = self.__constants['psa30']\n c2 = self.__constants2['psa30']\n else:\n raise ValueError(\"Unknown SA period: %f\" % pp)\n else:\n raise ValueError(\"Unknown IMT %r\" % imt)\n return (c, c2)",
"def getConstant(self):\n return _libsbml.Compartment_getConstant(self)",
"def load_constants():\r\n marker_dictionary = dict()\r\n marker_dictionary[\"SP\"] = SP\r\n marker_dictionary[\"LCL\"] = LCL\r\n marker_dictionary[\"ARG\"] = ARG\r\n marker_dictionary[\"THIS\"] = THIS\r\n marker_dictionary[\"THAT\"] = THAT\r\n marker_dictionary[\"SCREEN\"] = SCREEN\r\n marker_dictionary[\"KBD\"] = KBD\r\n for i in range(0, RAM_RESERVE_END):\r\n marker_dictionary[\"R\"+str(i)] = i\r\n return marker_dictionary",
"def constant(self):\n return self.__constant",
"def constant(self):\n return self.__constant",
"def constant(self):\n return self.__constant",
"def addons_config():\n # type () -> dict\n addons_json_path = 'addons.json'\n addons_json_path = os.path.join(_HERE, addons_json_path)\n with open(addons_json_path, encoding='utf-8') as addons_json:\n return json.load(addons_json)",
"def get_constants(prefix):\n return {\n getattr(socket, n): n\n for n in dir(socket)\n if n.startswith(prefix)\n }",
"def get_constants(self) -> CWConstants:\n self.serial.write(b\"M!\")\n values = self.__read_response(1)[0]\n assert values[0:2] == b\"!M\"\n v = values[2:]\n\n zener_voltage = (256 * v[1] + v[2]) / 100\n ldr_max_resistance = 256 * v[3] + v[4]\n ldr_pull_up_resistance = (256 * v[5] + v[6]) / 10\n rain_beta = 256 * v[7] + v[8]\n rain_res_at_25 = (256 * v[9] + v[10]) / 10\n rain_pull_up_resistance = (256 * v[11] + v[12]) / 10\n\n return CWConstants(\n zener_voltage=zener_voltage,\n ldr_max_resistance=ldr_max_resistance,\n ldr_pull_up_resistance=ldr_pull_up_resistance,\n rain_beta=rain_beta,\n rain_res_at_25=rain_res_at_25,\n rain_pull_up_resistance=rain_pull_up_resistance,\n )",
"def get_test_modules_names() -> typing.List[str]:\n\n from services.meter.tests.unit import constants_for_tests\n return constants_for_tests.TESTS_MODULES",
"def test_created():\n assert len(dir(constants)) > 300\n assert hasattr(constants, \"Planck_constant\") == True",
"def __get_common(attribute):\n return common.API_COMMON_CONFIG[attribute]",
"def getConstant(self):\n return _libsbml.Species_getConstant(self)"
] |
[
"0.72575116",
"0.7246178",
"0.70951766",
"0.7016354",
"0.68816453",
"0.68795377",
"0.64245206",
"0.6304636",
"0.62934524",
"0.5997974",
"0.5880374",
"0.5861224",
"0.5843757",
"0.58403367",
"0.57400537",
"0.5736622",
"0.5733507",
"0.57293123",
"0.5697053",
"0.5657399",
"0.56336623",
"0.56336623",
"0.56336623",
"0.56288034",
"0.56182647",
"0.561403",
"0.555579",
"0.55317706",
"0.5513723",
"0.54668045"
] |
0.7898684
|
0
|
Internal function to obtain the value of enable_addons.
|
def _get_enable_addons(self, enable_validation: bool = False) -> List[str]:
# determine the value of constants
addon_consts = self.get_addon_consts()
valid_addon_keys = addon_consts.get("ADDONS").keys()
# read the original value passed by the command
enable_addons = self.raw_param.get("enable_addons")
# normalize
enable_addons = enable_addons.split(',') if enable_addons else []
# validation
if enable_validation:
# check duplicate addons
duplicate_addons_set = {
x for x in enable_addons if enable_addons.count(x) >= 2
}
if len(duplicate_addons_set) != 0:
raise InvalidArgumentValueError(
"Duplicate addon{} '{}' found in option --enable-addons.".format(
"s" if len(duplicate_addons_set) > 1 else "",
",".join(duplicate_addons_set),
)
)
# check unrecognized addons
enable_addons_set = set(enable_addons)
invalid_addons_set = enable_addons_set.difference(valid_addon_keys)
if len(invalid_addons_set) != 0:
raise InvalidArgumentValueError(
"'{}' {} not recognized by the --enable-addons argument.".format(
",".join(invalid_addons_set),
"are" if len(invalid_addons_set) > 1 else "is",
)
)
# check monitoring/workspace_resource_id
workspace_resource_id = self._get_workspace_resource_id(read_only=True)
if "monitoring" not in enable_addons and workspace_resource_id:
raise RequiredArgumentMissingError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
# check virtual node/aci_subnet_name/vnet_subnet_id
# Note: The external parameters involved in the validation are not verified in their own getters.
aci_subnet_name = self.get_aci_subnet_name()
vnet_subnet_id = self.get_vnet_subnet_id()
if "virtual-node" in enable_addons and not (aci_subnet_name and vnet_subnet_id):
raise RequiredArgumentMissingError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
return enable_addons
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_enable_addons(self) -> List[str]:\n\n return self._get_enable_addons(enable_validation=True)",
"def _get_enable(self):\n return self.__enable",
"def _get_enable(self):\n return self.__enable",
"def addons_config(self) -> 'outputs.AddonsConfigResponse':\n return pulumi.get(self, \"addons_config\")",
"def getEnabled(self):\n if getattr(self, 'installedversion', None) != __version__ :\n return False\n return self.getField('enabled').get(self)",
"def get_addons_info(hass: HomeAssistant) -> dict[str, dict[str, Any]] | None:\n return hass.data.get(DATA_ADDONS_INFO)",
"def get_addons_stats(hass):\n return hass.data.get(DATA_ADDONS_STATS)",
"async def get_addons(self, only_installed=True):\n try:\n result = await self.send_command(COMMAND_GET_ADDONS, method=\"get\")\n\n addons = result.get(\"data\", {}).get(\"addons\")\n if addons is None:\n raise HassioAPIError(\"No addons were returned.\")\n\n if only_installed:\n return [addon for addon in addons if addon[\"installed\"]]\n return addons\n\n except HassioAPIError as err:\n _LOGGER.error(\"Failed to retrieve addons: %s\", err)\n\n return None",
"async def _async_get_addon_config(self):\n addon_info = await self._async_get_addon_info()\n return addon_info[\"options\"]",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True",
"def getAddon(self, addonName):\r\n return self.__getitem__(addonName)",
"def readHAL_enable(self):\r\n return self.hal['enable']",
"def get_enabledAtPowerOn(self):\n # res\n if self._cacheExpiration <= YAPI.GetTickCount():\n if self.load(YAPI._yapiContext.GetCacheValidity()) != YAPI.SUCCESS:\n return YPwmOutput.ENABLEDATPOWERON_INVALID\n res = self._enabledAtPowerOn\n return res",
"def addons_config():\n # type () -> dict\n addons_json_path = 'addons.json'\n addons_json_path = os.path.join(_HERE, addons_json_path)\n with open(addons_json_path, encoding='utf-8') as addons_json:\n return json.load(addons_json)",
"def get_enabled(self, channel):\n return self.extension_names - self.get_disabled(channel)",
"def get_enabled_plugins(self):\n return self._enabled_plugins",
"def enabled(self):\n return self._get('enabled')",
"def EM_advanced_enabled(self):\n\n state = ct.c_int()\n self.lib.GetEMAdvanced(ct.pointer(state))\n return state.value",
"def vnet_addons(self) -> Optional[pulumi.Input['ServiceVNetAddonsArgs']]:\n return pulumi.get(self, \"vnet_addons\")",
"def add_addons(self):\n pass",
"def get_enable_sgxquotehelper(self) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_CONFCOM_ADDON_NAME = addon_consts.get(\"CONST_CONFCOM_ADDON_NAME\")\n CONST_ACC_SGX_QUOTE_HELPER_ENABLED = addon_consts.get(\"CONST_ACC_SGX_QUOTE_HELPER_ENABLED\")\n\n # read the original value passed by the command\n enable_sgxquotehelper = self.raw_param.get(\"enable_sgxquotehelper\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_CONFCOM_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_CONFCOM_ADDON_NAME\n ).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) is not None\n ):\n enable_sgxquotehelper = self.mc.addon_profiles.get(\n CONST_CONFCOM_ADDON_NAME\n ).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) == \"true\"\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return enable_sgxquotehelper",
"def get_enable_interval(self):\n return self.quad_enable_interval",
"def get_isenabled(self):\n return self.isenabled",
"def ext_attribute_enabled(self) -> bool:\n return pulumi.get(self, \"ext_attribute_enabled\")",
"def vnet_addons(self) -> Optional[pulumi.Input['AppVNetAddonsArgs']]:\n return pulumi.get(self, \"vnet_addons\")",
"def is_enabled(self):\n siteconfig = SiteConfiguration.objects.get_current()\n return siteconfig.get('%s_enabled' % self.backend_id, False)",
"def is_enabled(self):",
"def enabled_modules(self):\n return [scomp for scomp in self.modules()\n if getattr(scomp, 'enabled', True)]",
"def extensions_enabled(self) -> bool:\n return pulumi.get(self, \"extensions_enabled\")",
"def get_enabled_modules(self):\n return self._gconf.get_enabled_modules()"
] |
[
"0.7205903",
"0.6804676",
"0.6804676",
"0.64908105",
"0.62050027",
"0.6193101",
"0.6038516",
"0.60257065",
"0.58602744",
"0.58555007",
"0.58165956",
"0.580858",
"0.5798054",
"0.57953113",
"0.5770367",
"0.5722522",
"0.57016325",
"0.5659138",
"0.56360584",
"0.5620025",
"0.56167394",
"0.553695",
"0.55299497",
"0.5496665",
"0.5489848",
"0.54827386",
"0.5452901",
"0.5452208",
"0.54230225",
"0.541893"
] |
0.70253557
|
1
|
Obtain the value of enable_addons.
|
def get_enable_addons(self) -> List[str]:
return self._get_enable_addons(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def addons_config(self) -> 'outputs.AddonsConfigResponse':\n return pulumi.get(self, \"addons_config\")",
"def _get_enable(self):\n return self.__enable",
"def _get_enable(self):\n return self.__enable",
"def _get_enable_addons(self, enable_validation: bool = False) -> List[str]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n valid_addon_keys = addon_consts.get(\"ADDONS\").keys()\n\n # read the original value passed by the command\n enable_addons = self.raw_param.get(\"enable_addons\")\n\n # normalize\n enable_addons = enable_addons.split(',') if enable_addons else []\n\n # validation\n if enable_validation:\n # check duplicate addons\n duplicate_addons_set = {\n x for x in enable_addons if enable_addons.count(x) >= 2\n }\n if len(duplicate_addons_set) != 0:\n raise InvalidArgumentValueError(\n \"Duplicate addon{} '{}' found in option --enable-addons.\".format(\n \"s\" if len(duplicate_addons_set) > 1 else \"\",\n \",\".join(duplicate_addons_set),\n )\n )\n\n # check unrecognized addons\n enable_addons_set = set(enable_addons)\n invalid_addons_set = enable_addons_set.difference(valid_addon_keys)\n if len(invalid_addons_set) != 0:\n raise InvalidArgumentValueError(\n \"'{}' {} not recognized by the --enable-addons argument.\".format(\n \",\".join(invalid_addons_set),\n \"are\" if len(invalid_addons_set) > 1 else \"is\",\n )\n )\n\n # check monitoring/workspace_resource_id\n workspace_resource_id = self._get_workspace_resource_id(read_only=True)\n if \"monitoring\" not in enable_addons and workspace_resource_id:\n raise RequiredArgumentMissingError(\n '\"--workspace-resource-id\" requires \"--enable-addons monitoring\".')\n\n # check virtual node/aci_subnet_name/vnet_subnet_id\n # Note: The external parameters involved in the validation are not verified in their own getters.\n aci_subnet_name = self.get_aci_subnet_name()\n vnet_subnet_id = self.get_vnet_subnet_id()\n if \"virtual-node\" in enable_addons and not (aci_subnet_name and vnet_subnet_id):\n raise RequiredArgumentMissingError(\n '\"--enable-addons virtual-node\" requires \"--aci-subnet-name\" and \"--vnet-subnet-id\".')\n return enable_addons",
"def getEnabled(self):\n if getattr(self, 'installedversion', None) != __version__ :\n return False\n return self.getField('enabled').get(self)",
"def enabled(self):\n return self._get('enabled')",
"def get_addons_stats(hass):\n return hass.data.get(DATA_ADDONS_STATS)",
"def readHAL_enable(self):\r\n return self.hal['enable']",
"def get_addons_info(hass: HomeAssistant) -> dict[str, dict[str, Any]] | None:\n return hass.data.get(DATA_ADDONS_INFO)",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True",
"async def _async_get_addon_config(self):\n addon_info = await self._async_get_addon_info()\n return addon_info[\"options\"]",
"async def get_addons(self, only_installed=True):\n try:\n result = await self.send_command(COMMAND_GET_ADDONS, method=\"get\")\n\n addons = result.get(\"data\", {}).get(\"addons\")\n if addons is None:\n raise HassioAPIError(\"No addons were returned.\")\n\n if only_installed:\n return [addon for addon in addons if addon[\"installed\"]]\n return addons\n\n except HassioAPIError as err:\n _LOGGER.error(\"Failed to retrieve addons: %s\", err)\n\n return None",
"def get_enabledAtPowerOn(self):\n # res\n if self._cacheExpiration <= YAPI.GetTickCount():\n if self.load(YAPI._yapiContext.GetCacheValidity()) != YAPI.SUCCESS:\n return YPwmOutput.ENABLEDATPOWERON_INVALID\n res = self._enabledAtPowerOn\n return res",
"def get_isenabled(self):\n return self.isenabled",
"def addons_config():\n # type () -> dict\n addons_json_path = 'addons.json'\n addons_json_path = os.path.join(_HERE, addons_json_path)\n with open(addons_json_path, encoding='utf-8') as addons_json:\n return json.load(addons_json)",
"def Enabled(self):\n return self._get_attribute('enabled')",
"def get_enabled_plugins(self):\n return self._enabled_plugins",
"def is_enabled(self):\n siteconfig = SiteConfiguration.objects.get_current()\n return siteconfig.get('%s_enabled' % self.backend_id, False)",
"def getAddon(self, addonName):\r\n return self.__getitem__(addonName)",
"def EM_advanced_enabled(self):\n\n state = ct.c_int()\n self.lib.GetEMAdvanced(ct.pointer(state))\n return state.value",
"def vnet_addons(self) -> Optional[pulumi.Input['ServiceVNetAddonsArgs']]:\n return pulumi.get(self, \"vnet_addons\")",
"def get_enable_interval(self):\n return self.quad_enable_interval",
"def get_enabled(self, channel):\n return self.extension_names - self.get_disabled(channel)",
"def enabled(self):\n return self.__enabled",
"def getEnabled( self, cCtrlName ):\n\n return self.getControlModelProperty( cCtrlName, \"Enabled\" )",
"def vnet_addons(self) -> Optional[pulumi.Input['AppVNetAddonsArgs']]:\n return pulumi.get(self, \"vnet_addons\")",
"def enabled(self):\n return self._enabled",
"def enabled(self):\n return self._enabled",
"def enabled(self):\n return self._enabled",
"def enabled(self):\n return self._enabled"
] |
[
"0.68291336",
"0.677199",
"0.677199",
"0.6652646",
"0.6496308",
"0.62080246",
"0.6165309",
"0.6142399",
"0.6116278",
"0.6077121",
"0.6076746",
"0.60332817",
"0.5964098",
"0.5953141",
"0.59218335",
"0.5914245",
"0.58532643",
"0.5850154",
"0.5836041",
"0.57907563",
"0.57693744",
"0.57405496",
"0.5728079",
"0.5709584",
"0.5691021",
"0.56524247",
"0.5644092",
"0.5644092",
"0.5644092",
"0.5644092"
] |
0.7205488
|
0
|
Internal function to dynamically obtain the value of workspace_resource_id according to the context. When workspace_resource_id is not assigned, dynamic completion will be triggerd. Function "ensure_default_log_analytics_workspace_for_monitoring" will be called to create a workspace with subscription_id and resource_group_name, which internally used ResourceManagementClient to send the request. This function supports the option of enable_validation. When enabled, it will check if workspace_resource_id is assigned but 'monitoring' is not specified in enable_addons, if so, raise a RequiredArgumentMissingError. This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
|
def _get_workspace_resource_id(
self, enable_validation: bool = False, read_only: bool = False
) -> Union[str, None]:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME")
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID = addon_consts.get(
"CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID"
)
# read the original value passed by the command
workspace_resource_id = self.raw_param.get("workspace_resource_id")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.addon_profiles and
CONST_MONITORING_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_MONITORING_ADDON_NAME
).config.get(CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID) is not None
):
workspace_resource_id = self.mc.addon_profiles.get(
CONST_MONITORING_ADDON_NAME
).config.get(CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID)
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return workspace_resource_id
# dynamic completion
if not read_from_mc:
if workspace_resource_id is None:
# use default workspace if exists else create default workspace
workspace_resource_id = (
self.external_functions.ensure_default_log_analytics_workspace_for_monitoring(
self.cmd,
self.get_subscription_id(),
self.get_resource_group_name(),
)
)
# normalize
workspace_resource_id = "/" + workspace_resource_id.strip(" /")
# validation
if enable_validation:
enable_addons = self._get_enable_addons(enable_validation=False)
if workspace_resource_id and "monitoring" not in enable_addons:
raise RequiredArgumentMissingError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
# this parameter does not need validation
return workspace_resource_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def log_analytics_workspace_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"log_analytics_workspace_resource_id\")",
"def get_workspace_resource_id(self) -> Union[str, None]:\n return self._get_workspace_resource_id(enable_validation=True)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_url: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None) -> 'Workspace':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _WorkspaceState.__new__(_WorkspaceState)\n\n __props__.__dict__[\"application_insights_id\"] = application_insights_id\n __props__.__dict__[\"container_registry_id\"] = container_registry_id\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_url\"] = discovery_url\n __props__.__dict__[\"encryption\"] = encryption\n __props__.__dict__[\"friendly_name\"] = friendly_name\n __props__.__dict__[\"high_business_impact\"] = high_business_impact\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"image_build_compute_name\"] = image_build_compute_name\n __props__.__dict__[\"key_vault_id\"] = key_vault_id\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"primary_user_assigned_identity\"] = primary_user_assigned_identity\n __props__.__dict__[\"public_access_behind_virtual_network_enabled\"] = public_access_behind_virtual_network_enabled\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"storage_account_id\"] = storage_account_id\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"v1_legacy_mode_enabled\"] = v1_legacy_mode_enabled\n __props__.__dict__[\"workspace_id\"] = workspace_id\n return Workspace(resource_name, opts=opts, __props__=__props__)",
"def workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_id\")",
"def get_workspace_id() -> str:\n token = BaseHook.get_connection(AirflowConns.TERRAFORM).password\n terraform_api = TerraformApi(token)\n\n # Get organization\n organization = Variable.get(AirflowVars.TERRAFORM_ORGANIZATION)\n\n # Get workspace\n environment = Variable.get(AirflowVars.ENVIRONMENT)\n workspace = TerraformConfig.WORKSPACE_PREFIX + environment\n\n # Get workspace ID\n workspace_id = terraform_api.workspace_id(organization, workspace)\n\n return workspace_id",
"def workspace_id(self) -> Optional[str]:\n return pulumi.get(self, \"workspace_id\")",
"def __init__(__self__, *,\n log_analytics_workspace_resource_id: Optional[pulumi.Input[str]] = None,\n security_monitoring: Optional[pulumi.Input['ManagedClusterSecurityProfileDefenderSecurityMonitoringArgs']] = None):\n if log_analytics_workspace_resource_id is not None:\n pulumi.set(__self__, \"log_analytics_workspace_resource_id\", log_analytics_workspace_resource_id)\n if security_monitoring is not None:\n pulumi.set(__self__, \"security_monitoring\", security_monitoring)",
"def workspace_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_id\")",
"def synapse_workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def workspace_security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_security_group_id\")",
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> str:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> str:\n return pulumi.get(self, \"workspace_id\")",
"def synapse_workspace_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n admin_role_values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n allowed_organizations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n editor_role_values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n email_assertion: Optional[pulumi.Input[str]] = None,\n groups_assertion: Optional[pulumi.Input[str]] = None,\n idp_metadata_url: Optional[pulumi.Input[str]] = None,\n idp_metadata_xml: Optional[pulumi.Input[str]] = None,\n login_assertion: Optional[pulumi.Input[str]] = None,\n login_validity_duration: Optional[pulumi.Input[int]] = None,\n name_assertion: Optional[pulumi.Input[str]] = None,\n org_assertion: Optional[pulumi.Input[str]] = None,\n role_assertion: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None) -> 'WorkspaceSamlConfiguration':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _WorkspaceSamlConfigurationState.__new__(_WorkspaceSamlConfigurationState)\n\n __props__.__dict__[\"admin_role_values\"] = admin_role_values\n __props__.__dict__[\"allowed_organizations\"] = allowed_organizations\n __props__.__dict__[\"editor_role_values\"] = editor_role_values\n __props__.__dict__[\"email_assertion\"] = email_assertion\n __props__.__dict__[\"groups_assertion\"] = groups_assertion\n __props__.__dict__[\"idp_metadata_url\"] = idp_metadata_url\n __props__.__dict__[\"idp_metadata_xml\"] = idp_metadata_xml\n __props__.__dict__[\"login_assertion\"] = login_assertion\n __props__.__dict__[\"login_validity_duration\"] = login_validity_duration\n __props__.__dict__[\"name_assertion\"] = name_assertion\n __props__.__dict__[\"org_assertion\"] = org_assertion\n __props__.__dict__[\"role_assertion\"] = role_assertion\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"workspace_id\"] = workspace_id\n return WorkspaceSamlConfiguration(resource_name, opts=opts, __props__=__props__)",
"def workspace_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_security_group_id\")",
"def get_workspace(self):\n\n # Our AML config file\n with open(\"/usr/src/api/config.json\", \"r\") as json_file:\n config_data = json.load(json_file)\n\n # Let's connect to our workspace\n sp = ServicePrincipalAuthentication(tenant_id=config_data['tenant_id'], # tenantID\n service_principal_id=config_data['service_principal_id'], # clientId\n service_principal_password=config_data[\n 'service_principal_password']) # clientSecret\n\n ws = Workspace.get(name=config_data['workspace_name'],\n auth=sp,\n subscription_id=config_data['subscription_id'],\n resource_group=config_data['resource_group'])\n\n return ws",
"def synapse_workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def workspace_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_security_group_id\")",
"def get_workspace(self, user_id=None, alias=None, unique_id=None, include_deleted=False):\n # Get UUID for workspace\n if alias == 'default_workspace':\n unique_id = 'default_workspace'\n else:\n uuid_mapping = self._get_uuid_mapping_object(user_id)\n status = self.include_status[:]\n if include_deleted:\n status.append('deleted')\n if not unique_id:\n unique_id = uuid_mapping.get_uuid(alias, user_id, status=status)\n if not unique_id:\n return False\n # return matching workspace \n self._logger.debug('Getting workspace \"{}\" with alias \"{}\"'.format(unique_id, alias)) \n \n return self.workspaces.get(unique_id, None)",
"def get_ws():\n tenant = os.environ.get('TENANT')\n if tenant:\n auth = InteractiveLoginAuthentication(tenant_id = tenant)\n ws = Workspace.from_config(auth = auth)\n else:\n ws = Workspace.from_config()\n return ws",
"def _system_job_workspaces(job):\n workspaces = {}\n data = job.get_input_data()\n\n # Configure ingest workspace based on input data values\n if job.job_type.name == 'scale-ingest':\n workspace_name = None\n new_workspace_name = None\n if 'workspace' in data.values:\n workspace_name = data.values['workspace'].value\n if 'new_workspace' in data.values:\n new_workspace_name = data.values['new_workspace'].value\n else:\n # Old ingest jobs do not have the workspace(s) in their data, will need to query ingest model\n if 'ingest_id' in data.values:\n ingest_id = data.values['ingest_id'].value\n from ingest.models import Ingest\n ingest = Ingest.objects.select_related('workspace', 'new_workspace').get(id=ingest_id)\n workspace_name = ingest.workspace.name\n if ingest.new_workspace:\n new_workspace_name = ingest.new_workspace.name\n if workspace_name:\n workspaces[workspace_name] = TaskWorkspace(workspace_name, MODE_RW)\n if new_workspace_name:\n workspaces[new_workspace_name] = TaskWorkspace(new_workspace_name, MODE_RW)\n\n # Configure Strike workspace based on current configuration\n if job.job_type.name == 'scale-strike':\n strike_id = data.values['STRIKE_ID'].value\n from ingest.models import Strike\n strike = Strike.objects.get(id=strike_id)\n workspace_name = strike.get_strike_configuration().get_workspace()\n workspaces[workspace_name] = TaskWorkspace(workspace_name, MODE_RW)\n\n # Configure Scan workspace based on current configuration\n if job.job_type.name == 'scale-scan':\n scan_id = data.values['SCAN_ID'].value\n from ingest.models import Scan\n scan = Scan.objects.get(id=scan_id)\n workspace_name = scan.get_scan_configuration().get_workspace()\n workspaces[workspace_name] = TaskWorkspace(workspace_name, MODE_RW)\n\n # Configure Scale Delete Files workspaces based on input workspaces\n if job.job_type.name == 'scale-delete-files':\n import json\n wrkspc_list = json.loads(data.get_property_values(['workspaces'])['workspaces'])\n\n workspaces = {w_name: TaskWorkspace(w_name, MODE_RW) for d in wrkspc_list for w_name, _v in d.items()}\n\n return workspaces",
"def test_default_isolated_workspace():\n rally = Rally(server=RALLY, user=RALLY_USER, password=RALLY_PSWD, server_ping=False, isolated_workspace=True)\n context1 = rally.contextHelper.currentContext()\n workspace = rally.getWorkspace()\n project = rally.getProject()\n context2 = rally.contextHelper.currentContext()\n assert context1 == context2\n assert context1.workspace == DEFAULT_WORKSPACE\n assert workspace.Name == DEFAULT_WORKSPACE\n assert context1.project == DEFAULT_PROJECT\n assert project.Name == DEFAULT_PROJECT\n url = makeResourceUrl(rally, 'Defect')\n #print(url)\n expected_workspace_clause = 'workspace=workspace/%s' % str(workspace.oid)\n assert expected_workspace_clause in url\n\n problem_text = 'No reset of of the Workspace is permitted when the isolated_workspace option is specified'\n with py.test.raises(Exception) as excinfo:\n rally.setWorkspace(ALTERNATE_WORKSPACE)\n actualErrVerbiage = excinfo.value.args[0] \n assert excinfo.value.__class__.__name__ == 'RallyRESTAPIError'\n assert actualErrVerbiage == problem_text",
"def workspace_exists(client, workspace):\n data = {\"workspace\": workspace}\n return client._creoson_post(\"windchill\", \"workspace_exists\", data, \"exists\")",
"def create_workspace(client, workspace, context_name):\n data = {\"workspace\": workspace, \"context\": context_name}\n return client._creoson_post(\"windchill\", \"create_workspace\", data)",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...",
"def test___init___options_workspace(\n self, runway_context: MockRunwayContext, tmp_path: Path\n ) -> None:\n options = {\"terraform_workspace\": \"default\"}\n obj = Terraform(runway_context, module_root=tmp_path, options=options)\n assert obj.required_workspace == options[\"terraform_workspace\"]",
"def _before(self, *args, **kw):\n super(self.__class__, self)._before(args, kw)\n\n workspace_api = WorkspaceApi(tg.tmpl_context.current_user)\n workspace_id = tg.request.controller_state.routing_args.get('workspace_id')\n workspace = workspace_api.get_one(workspace_id)\n tg.tmpl_context.workspace_id = workspace_id\n tg.tmpl_context.workspace = workspace"
] |
[
"0.6765404",
"0.66471523",
"0.5830044",
"0.5674615",
"0.5674615",
"0.56254786",
"0.56119746",
"0.5346999",
"0.5292099",
"0.52377284",
"0.5199666",
"0.51216084",
"0.51216084",
"0.5087775",
"0.5087775",
"0.4935949",
"0.4886711",
"0.48856503",
"0.48505947",
"0.477355",
"0.47114563",
"0.46507993",
"0.4592206",
"0.458914",
"0.4574156",
"0.45361403",
"0.45305637",
"0.4510726",
"0.44935998",
"0.4482417"
] |
0.75189555
|
0
|
Dynamically obtain the value of workspace_resource_id according to the context. When workspace_resource_id is not assigned, dynamic completion will be triggerd. Function "ensure_default_log_analytics_workspace_for_monitoring" will be called to create a workspace with subscription_id and resource_group_name, which internally used ResourceManagementClient to send the request.
|
def get_workspace_resource_id(self) -> Union[str, None]:
return self._get_workspace_resource_id(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def log_analytics_workspace_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"log_analytics_workspace_resource_id\")",
"def _get_workspace_resource_id(\n self, enable_validation: bool = False, read_only: bool = False\n ) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_MONITORING_ADDON_NAME = addon_consts.get(\"CONST_MONITORING_ADDON_NAME\")\n CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID = addon_consts.get(\n \"CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID\"\n )\n\n # read the original value passed by the command\n workspace_resource_id = self.raw_param.get(\"workspace_resource_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n read_from_mc = False\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_MONITORING_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_MONITORING_ADDON_NAME\n ).config.get(CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID) is not None\n ):\n workspace_resource_id = self.mc.addon_profiles.get(\n CONST_MONITORING_ADDON_NAME\n ).config.get(CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID)\n read_from_mc = True\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return workspace_resource_id\n\n # dynamic completion\n if not read_from_mc:\n if workspace_resource_id is None:\n # use default workspace if exists else create default workspace\n workspace_resource_id = (\n self.external_functions.ensure_default_log_analytics_workspace_for_monitoring(\n self.cmd,\n self.get_subscription_id(),\n self.get_resource_group_name(),\n )\n )\n # normalize\n workspace_resource_id = \"/\" + workspace_resource_id.strip(\" /\")\n\n # validation\n if enable_validation:\n enable_addons = self._get_enable_addons(enable_validation=False)\n if workspace_resource_id and \"monitoring\" not in enable_addons:\n raise RequiredArgumentMissingError(\n '\"--workspace-resource-id\" requires \"--enable-addons monitoring\".')\n\n # this parameter does not need validation\n return workspace_resource_id",
"def get_workspace_id() -> str:\n token = BaseHook.get_connection(AirflowConns.TERRAFORM).password\n terraform_api = TerraformApi(token)\n\n # Get organization\n organization = Variable.get(AirflowVars.TERRAFORM_ORGANIZATION)\n\n # Get workspace\n environment = Variable.get(AirflowVars.ENVIRONMENT)\n workspace = TerraformConfig.WORKSPACE_PREFIX + environment\n\n # Get workspace ID\n workspace_id = terraform_api.workspace_id(organization, workspace)\n\n return workspace_id",
"def workspace_id(self) -> Optional[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> str:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> str:\n return pulumi.get(self, \"workspace_id\")",
"def workspace_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_id\")",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_url: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None) -> 'Workspace':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _WorkspaceState.__new__(_WorkspaceState)\n\n __props__.__dict__[\"application_insights_id\"] = application_insights_id\n __props__.__dict__[\"container_registry_id\"] = container_registry_id\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_url\"] = discovery_url\n __props__.__dict__[\"encryption\"] = encryption\n __props__.__dict__[\"friendly_name\"] = friendly_name\n __props__.__dict__[\"high_business_impact\"] = high_business_impact\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"image_build_compute_name\"] = image_build_compute_name\n __props__.__dict__[\"key_vault_id\"] = key_vault_id\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"primary_user_assigned_identity\"] = primary_user_assigned_identity\n __props__.__dict__[\"public_access_behind_virtual_network_enabled\"] = public_access_behind_virtual_network_enabled\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"storage_account_id\"] = storage_account_id\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"v1_legacy_mode_enabled\"] = v1_legacy_mode_enabled\n __props__.__dict__[\"workspace_id\"] = workspace_id\n return Workspace(resource_name, opts=opts, __props__=__props__)",
"def synapse_workspace_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def synapse_workspace_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def synapse_workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"synapse_workspace_id\")",
"def create_workspace(client, workspace, context_name):\n data = {\"workspace\": workspace, \"context\": context_name}\n return client._creoson_post(\"windchill\", \"create_workspace\", data)",
"def __init__(__self__, *,\n log_analytics_workspace_resource_id: Optional[pulumi.Input[str]] = None,\n security_monitoring: Optional[pulumi.Input['ManagedClusterSecurityProfileDefenderSecurityMonitoringArgs']] = None):\n if log_analytics_workspace_resource_id is not None:\n pulumi.set(__self__, \"log_analytics_workspace_resource_id\", log_analytics_workspace_resource_id)\n if security_monitoring is not None:\n pulumi.set(__self__, \"security_monitoring\", security_monitoring)",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")",
"def workspace_security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"workspace_security_group_id\")",
"def get_workspace(self):\n\n # Our AML config file\n with open(\"/usr/src/api/config.json\", \"r\") as json_file:\n config_data = json.load(json_file)\n\n # Let's connect to our workspace\n sp = ServicePrincipalAuthentication(tenant_id=config_data['tenant_id'], # tenantID\n service_principal_id=config_data['service_principal_id'], # clientId\n service_principal_password=config_data[\n 'service_principal_password']) # clientSecret\n\n ws = Workspace.get(name=config_data['workspace_name'],\n auth=sp,\n subscription_id=config_data['subscription_id'],\n resource_group=config_data['resource_group'])\n\n return ws",
"def workspace_exists(client, workspace):\n data = {\"workspace\": workspace}\n return client._creoson_post(\"windchill\", \"workspace_exists\", data, \"exists\")",
"def create_workspace(self, **data):\n payload = data\n payload.update({\n '@type': 'opengever.workspace.workspace'\n })\n\n return self.request.post('/workspaces', json=payload).json()",
"def _system_job_workspaces(job):\n workspaces = {}\n data = job.get_input_data()\n\n # Configure ingest workspace based on input data values\n if job.job_type.name == 'scale-ingest':\n workspace_name = None\n new_workspace_name = None\n if 'workspace' in data.values:\n workspace_name = data.values['workspace'].value\n if 'new_workspace' in data.values:\n new_workspace_name = data.values['new_workspace'].value\n else:\n # Old ingest jobs do not have the workspace(s) in their data, will need to query ingest model\n if 'ingest_id' in data.values:\n ingest_id = data.values['ingest_id'].value\n from ingest.models import Ingest\n ingest = Ingest.objects.select_related('workspace', 'new_workspace').get(id=ingest_id)\n workspace_name = ingest.workspace.name\n if ingest.new_workspace:\n new_workspace_name = ingest.new_workspace.name\n if workspace_name:\n workspaces[workspace_name] = TaskWorkspace(workspace_name, MODE_RW)\n if new_workspace_name:\n workspaces[new_workspace_name] = TaskWorkspace(new_workspace_name, MODE_RW)\n\n # Configure Strike workspace based on current configuration\n if job.job_type.name == 'scale-strike':\n strike_id = data.values['STRIKE_ID'].value\n from ingest.models import Strike\n strike = Strike.objects.get(id=strike_id)\n workspace_name = strike.get_strike_configuration().get_workspace()\n workspaces[workspace_name] = TaskWorkspace(workspace_name, MODE_RW)\n\n # Configure Scan workspace based on current configuration\n if job.job_type.name == 'scale-scan':\n scan_id = data.values['SCAN_ID'].value\n from ingest.models import Scan\n scan = Scan.objects.get(id=scan_id)\n workspace_name = scan.get_scan_configuration().get_workspace()\n workspaces[workspace_name] = TaskWorkspace(workspace_name, MODE_RW)\n\n # Configure Scale Delete Files workspaces based on input workspaces\n if job.job_type.name == 'scale-delete-files':\n import json\n wrkspc_list = json.loads(data.get_property_values(['workspaces'])['workspaces'])\n\n workspaces = {w_name: TaskWorkspace(w_name, MODE_RW) for d in wrkspc_list for w_name, _v in d.items()}\n\n return workspaces",
"def test_default_context():\n rally = Rally(server=RALLY, user=RALLY_USER, password=RALLY_PSWD, server_ping=False)\n context1 = rally.contextHelper.currentContext()\n workspace = rally.getWorkspace()\n project = rally.getProject()\n context2 = rally.contextHelper.currentContext()\n assert context1 == context2\n assert context1.workspace == DEFAULT_WORKSPACE\n assert workspace.Name == DEFAULT_WORKSPACE\n assert context1.project == DEFAULT_PROJECT\n assert project.Name == DEFAULT_PROJECT\n url = makeResourceUrl(rally, 'Defect')\n #print(url)\n expected_workspace_clause = 'workspace=workspace/%s' % str(workspace.oid)\n assert expected_workspace_clause in url\n expected_project_clause = 'project=project/%s' % str(project.oid)\n assert expected_project_clause in url",
"def get_workspace(client):\n return client._creoson_post(\"windchill\", \"get_workspace\", key_data=\"workspace\")",
"def workspace_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_security_group_id\")",
"def get_ws():\n tenant = os.environ.get('TENANT')\n if tenant:\n auth = InteractiveLoginAuthentication(tenant_id = tenant)\n ws = Workspace.from_config(auth = auth)\n else:\n ws = Workspace.from_config()\n return ws",
"def workspace_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_security_group_id\")"
] |
[
"0.71933305",
"0.6364322",
"0.6171849",
"0.614986",
"0.6058244",
"0.6058244",
"0.57621074",
"0.57621074",
"0.5758293",
"0.5758293",
"0.5756883",
"0.5652816",
"0.56086487",
"0.53660405",
"0.53605044",
"0.52795064",
"0.52268714",
"0.5118914",
"0.5118914",
"0.5118914",
"0.50680864",
"0.504583",
"0.49383518",
"0.48852122",
"0.4869012",
"0.48516786",
"0.48306465",
"0.4818719",
"0.47833133",
"0.47463804"
] |
0.65066355
|
1
|
Obtain the value of enable_msi_auth_for_monitoring.
|
def get_enable_msi_auth_for_monitoring(self) -> Union[bool, None]:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_MONITORING_ADDON_NAME = addon_consts.get("CONST_MONITORING_ADDON_NAME")
CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get("CONST_MONITORING_USING_AAD_MSI_AUTH")
# read the original value passed by the command
enable_msi_auth_for_monitoring = self.raw_param.get("enable_msi_auth_for_monitoring")
if (
self.mc and
self.mc.service_principal_profile and
self.mc.service_principal_profile.client_id is not None
):
return False
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_MONITORING_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_MONITORING_ADDON_NAME
).config.get(CONST_MONITORING_USING_AAD_MSI_AUTH) is not None
):
enable_msi_auth_for_monitoring = (
safe_lower(
self.mc.addon_profiles.get(CONST_MONITORING_ADDON_NAME).config.get(
CONST_MONITORING_USING_AAD_MSI_AUTH
)
) == "true"
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_msi_auth_for_monitoring
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _GetEnableOsLoginValue(self, metadata_dict):\n instance_data, project_data = self._GetInstanceAndProjectAttributes(\n metadata_dict)\n instance_value = instance_data.get('enable-oslogin')\n project_value = project_data.get('enable-oslogin')\n value = instance_value or project_value or ''\n\n return value.lower() == 'true'",
"def auth_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"auth_status\")",
"def enable_authentication(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_authentication\")",
"def _get_enable(self):\n return self.__enable",
"def _get_enable(self):\n return self.__enable",
"def auth_mode(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"auth_mode\")",
"def enable_authentication(self) -> bool:\n return pulumi.get(self, \"enable_authentication\")",
"def get_auth(self):\n return self._auth",
"def auth_setting(self) -> pulumi.Input[Union['AcceleratorBasicAuthSettingArgs', 'AcceleratorPublicSettingArgs', 'AcceleratorSshSettingArgs']]:\n return pulumi.get(self, \"auth_setting\")",
"def auth(self):\n return auth.get_auth()",
"def auth(self) -> Optional[pulumi.Input['IstioConfigAuth']]:\n return pulumi.get(self, \"auth\")",
"def enable_integrity_monitoring(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_integrity_monitoring\")",
"def _GetEnableTwoFactorValue(self, metadata_dict):\n instance_data, project_data = self._GetInstanceAndProjectAttributes(\n metadata_dict)\n instance_value = instance_data.get('enable-oslogin-2fa')\n project_value = project_data.get('enable-oslogin-2fa')\n value = instance_value or project_value or ''\n\n return value.lower() == 'true'",
"def device_only_auth_enabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"device_only_auth_enabled\")",
"def auth(self):\n return self._auth",
"def organization_enable_status(self) -> str:\n return pulumi.get(self, \"organization_enable_status\")",
"def auth(self):\n # boundary = int(ssis_synctree_settings.get('SSIS_AUTOSEND', 'auth_boundary'))\n # ldap_auth = ssis_synctree_settings.get('SSIS_AUTOSEND', 'auth_above_equal')\n # manual_auth = ssis_synctree_settings.get('SSIS_AUTOSEND', 'auth_less_than')\n # return ldap_auth if int(self._grade) >= boundary else manual_auth\n return 'ldap_syncplus'",
"def getEnabled(self):\n if getattr(self, 'installedversion', None) != __version__ :\n return False\n return self.getField('enabled').get(self)",
"def auth(self):\n return auth.get_auth()",
"def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token",
"def auth_token_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"auth_token_enabled\")",
"def readHAL_enable(self):\r\n return self.hal['enable']",
"def authentication_mode(self) -> pulumi.Output['outputs.UserAuthenticationMode']:\n return pulumi.get(self, \"authentication_mode\")",
"def authenticationToken(self):\n return self.authToken",
"def auth_method(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"auth_method\")",
"def get_basic_auth(self):\n return base64.b64encode(\"%s:%s\" % (self.settings[\"MERCHANT_ID\"],\n self.settings[\"MERCHANT_KEY\"]))",
"def get_start_user_login(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetStartUserLogin', self.handle)",
"def auth(self):\n return self._auth_config",
"def auth_mode(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_mode\")",
"def get_enable_syslog(self) -> Union[bool, None]:\n # read the original value passed by the command\n enable_syslog = self.raw_param.get(\"enable_syslog\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return enable_syslog"
] |
[
"0.6428526",
"0.6033685",
"0.5987081",
"0.58922654",
"0.58922654",
"0.58912647",
"0.5848097",
"0.5837402",
"0.57985854",
"0.56505895",
"0.56397754",
"0.5638569",
"0.5592268",
"0.5578762",
"0.5571033",
"0.55265933",
"0.551889",
"0.5505663",
"0.549854",
"0.54973227",
"0.5479216",
"0.54771477",
"0.5470293",
"0.5460145",
"0.5453519",
"0.5447996",
"0.54380965",
"0.54089457",
"0.54027164",
"0.53900343"
] |
0.8207107
|
0
|
Obtain the value of enable_syslog.
|
def get_enable_syslog(self) -> Union[bool, None]:
# read the original value passed by the command
enable_syslog = self.raw_param.get("enable_syslog")
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_syslog
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getNodeSyslog(self,node):\n data = self.connect('get','nodes/%s/syslog' % (node),None)\n return data",
"def _GetLoggingSetting(self):\n # Supported values in GCP are '', 'true', and 'True'.\n settings = [str(x) for x in self.term.logging]\n if any(value in settings for value in ['true', 'True']):\n return True\n return False",
"def get_syslog_server(client_session, user_id):\n\n cfg_result = client_session.read('systemSyslogServer')\n\n if cfg_result['status'] == 200:\n return True\n else:\n return False",
"def get_log_level(self) -> int:\n logging.debug(self.args)\n return getattr(logging, self.args.loglevel.upper(), None)",
"def getLogLevel(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_LEVEL_KEY)",
"def _get_lsp_config_notify_isis(self):\n return self.__lsp_config_notify_isis",
"def log_level(self) -> str:\n return pulumi.get(self, \"log_level\")",
"def _get_lsp_config_isis_level(self):\n return self.__lsp_config_isis_level",
"def _get_lsp_config_notify_ospf(self):\n return self.__lsp_config_notify_ospf",
"def _get_loglevel(self, **params):\n try:\n preamble = params['data']['preamble']\n return self.loglevel[preamble.split(\" \")[1]]\n except KeyError:\n return logging.DEBUG",
"def logunsentlbsys(self) :\n\t\ttry :\n\t\t\treturn self._logunsentlbsys\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_enable_timestamp_write():\n global enable_timestamp_write\n return enable_timestamp_write",
"def _get_log_status(self):\n log_status = rdBase.LogStatus()\n log_status = {st.split(\":\")[0]: st.split(\":\")[1] for st in log_status.split(\"\\n\")}\n log_status = {k: True if v == \"enabled\" else False for k, v in log_status.items()}\n return log_status",
"def getLogLevel() -> Optional[int]:\n\n if not logger.disabled:\n return logger.level\n return None",
"def fusion_api_get_remote_syslog_configuration(self, api=None, headers=None, param=None):\n return self.remote_syslog.get(api=api, headers=headers, param=param)",
"def _get_enable(self):\n return self.__enable",
"def _get_enable(self):\n return self.__enable",
"def get_logging_level(self):\n return self.logging_level",
"def test_syslog_shortcut_simple(self):\n with cleanup_handlers():\n expected_message = random_string(50)\n coloredlogs.install(syslog=True)\n logging.info(\"%s\", expected_message)\n if os.path.isfile(UNIX_SYSTEM_LOG):\n with open(UNIX_SYSTEM_LOG) as handle:\n assert any(expected_message in line for line in handle)",
"def binary_log_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"binary_log_enabled\")",
"def logging(self) -> LogLevel:\n return self._data[ATTR_LOGGING]",
"def _get_lsp_config_shortcut_isis_level_configured(self):\n return self.__lsp_config_shortcut_isis_level_configured",
"def enable_access_logging(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_access_logging\")",
"def syslog_bind_host(self):\n return _host_tuple(self._get('syslog_bind_host'))",
"def binary_log_enabled(self) -> bool:\n return pulumi.get(self, \"binary_log_enabled\")",
"def get_logging_level():\n try:\n level = rcp.get(\"logging\",\"level\").upper()\n return convert_logging_level(level)\n except:\n logging.warning(\"[logging] section of the config malformed.\")\n return False",
"def enable_container_logging(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_container_logging\")",
"def get_logger():\n global swan_logger\n return swan_logger",
"def isLogEventsEnabled(self):\n if DPxIsDinLogEvents() == 0:\n enabled = False\n else:\n enabled = True\n return enabled",
"def log_level(self) -> str:\n return self._log_level"
] |
[
"0.60983366",
"0.5722266",
"0.5693049",
"0.5658202",
"0.56569153",
"0.54985195",
"0.5477139",
"0.5425688",
"0.5399692",
"0.53968275",
"0.538075",
"0.53761613",
"0.53712434",
"0.5334233",
"0.5316423",
"0.52780515",
"0.52780515",
"0.5260578",
"0.52439195",
"0.52166086",
"0.52071387",
"0.5204574",
"0.51877314",
"0.5183099",
"0.5177629",
"0.517604",
"0.51609564",
"0.5115649",
"0.50931245",
"0.5087993"
] |
0.8398978
|
0
|
Obtain the value of data_collection_settings.
|
def get_data_collection_settings(self) -> Union[str, None]:
# read the original value passed by the command
data_collection_settings_file_path = self.raw_param.get("data_collection_settings")
# validate user input
if data_collection_settings_file_path:
if not os.path.isfile(data_collection_settings_file_path):
raise InvalidArgumentValueError(
"{} is not valid file, or not accessable.".format(
data_collection_settings_file_path
)
)
return data_collection_settings_file_path
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def settings():\n return _get_settings()[1]",
"def value(self):\n\n memcached_items = memcache_services.get_multi([self.name])\n if self.name in memcached_items:\n return memcached_items[self.name]\n\n datastore_item = config_models.ConfigPropertyModel.get(\n self.name, strict=False)\n if datastore_item is not None:\n memcache_services.set_multi({\n datastore_item.id: datastore_item.value})\n return datastore_item.value\n\n return self.default_value",
"def getValue(self, valueName):\n\t\treturn self.settings[valueName][0]",
"def get_settings(self):\n return self.settings",
"def value(self) -> str:\n return self._config.get('value')",
"def get_setting_value(self, title, setting):\r\n return self.parser.get(title, setting)",
"def settings(self):\n return self._settings",
"def settings(self):\n return self._settings",
"def get_settings():\n return db.get_data()",
"def value(self, setting_id):\n if setting_id not in self.values:\n return False\n return self.values[setting_id]",
"def get_value(self, instance):\n try:\n return ParameterSetting.objects.get(\n base_parameter=instance,\n project=self.context.get(\"view\").kwargs.get(\"project\"),\n ).raw_value\n except ParameterSetting.DoesNotExist:\n return None",
"def get_setting(self, id):\n return __settings__.getSetting(id)",
"def data_option(self):\n if \"dataOption\" in self._prop_dict:\n return self._prop_dict[\"dataOption\"]\n else:\n return None",
"def get_raw(self):\n return self.settings",
"def get_raw(self):\n return self.settings",
"def get_raw(self):\n return self.settings",
"def get_raw(self):\n return self.settings",
"def value(self):\n return self.get_data(\"value\")",
"def get_setting_value(self, key, default = None):\n \n if not \"settings\" in self.configuration or not key in self.configuration['settings']:\n return default\n \n return self.configuration['settings'][key]",
"def get_setting(self, setting):\n return self.do_rpc(\"get_setting\", key=key)",
"def getSettings(self):\n return self.cfg",
"def get_config(self, key):\n return self.data[key]",
"def get_value(self, data):\n value = data['value']\n return value",
"def to_native_value(self):\n return self.__class__.get_setting(self.key)",
"def get_settings():\n return SettingCollection.build()",
"def __getitem__(self, name):\n\n return self._settings[name]",
"def get_config_value(self, name):\r\n if name in self.config_values:\r\n return self.config_values[name]",
"def get(cls, user_name, s_key):\n setting = cls.get_object(user_name, s_key)\n if setting:\n return setting.s_value",
"def value(self):\r\n return self._data['value']",
"def settings(self) -> Optional[pulumi.Input['ConfigurationServiceSettingsArgs']]:\n return pulumi.get(self, \"settings\")"
] |
[
"0.6528297",
"0.6382505",
"0.635182",
"0.6245283",
"0.61727256",
"0.61666256",
"0.61304647",
"0.61304647",
"0.6120048",
"0.60848856",
"0.6051074",
"0.6050535",
"0.60140234",
"0.5946681",
"0.5946681",
"0.5946681",
"0.5946681",
"0.594485",
"0.5935633",
"0.59317094",
"0.5925324",
"0.5915223",
"0.58593905",
"0.57995635",
"0.57880396",
"0.57664585",
"0.5751433",
"0.57504433",
"0.57461035",
"0.5735442"
] |
0.74231696
|
0
|
Helper function to obtain the os_type of virtual node addon.
|
def get_virtual_node_addon_os_type(self) -> str:
return "Linux"
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_os_type(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetOsType', self.handle)",
"def os_type(self) -> Optional[str]:\n return pulumi.get(self, \"os_type\")",
"def os_type(self) -> Optional[str]:\n return pulumi.get(self, \"os_type\")",
"def get_host_os_type(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsType', self.handle)",
"def os_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"os_type\")",
"def known_os_type():\n return 'Linux'",
"def vm_os_type(self, name_of_vm):\n # import pdb;pdb.set_trace()\n vm_obj = self.get_dc_object([vim.VirtualMachine], name_of_vm)\n os_type = vm_obj.config.guestFullName\n if \"CentOS\" in os_type:\n ostype = \"Centos\"\n return ostype\n elif \"Windows\" in os_type:\n ostype = \"Windows\"\n return ostype\n elif \"Red Hat\" in os_type:\n ostype = \"Red Hat\"\n return ostype",
"def os_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"os_type\")",
"def os_type(self) -> Optional[pulumi.Input[Union[str, 'OSType']]]:\n return pulumi.get(self, \"os_type\")",
"def os_type(self):\n\n return self._os_type",
"def _detect_os():\n # TODO: Add pillar support for the apachectl location\n os_family = __grains__[\"os_family\"]\n if os_family == \"RedHat\":\n return \"apachectl\"\n elif os_family == \"Debian\" or os_family == \"Suse\":\n return \"apache2ctl\"\n else:\n return \"apachectl\"",
"def get_os_type(cls):\n return {\n 'Darwin': cls.MAC,\n 'Linux': cls.LINUX,\n 'Windows': cls.WINDOWS\n }.get(platform.system(), cls.LINUX)",
"def get_os():\n\n os_platform = sys.platform\n\n if os_platform.startswith('darwin'):\n return 'mac'\n\n if os_platform.startswith('linux'):\n return 'linux'\n\n if os_platform.startswith('win'):\n return 'windows'\n\n raise RuntimeError('Unsupported operating system.')",
"def osversion():\n return platform()",
"def _get_osname():\n osname = sys.platform.lower()\n if osname == \"linux2\":\n osname = \"linux\"\n return osname",
"def get_os_name(cls):\n return cls.get_os_type().name",
"def get_os_version(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetOsVersion', self.handle)",
"def get_os_version(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetOsVersion', self.handle)",
"def os_version(self) -> Optional[pulumi.Input['WindowsNodeConfigOsVersion']]:\n return pulumi.get(self, \"os_version\")",
"def getOsVersion():\n os_version_tuple = platform.mac_ver()[0].split('.')\n return int(os_version_tuple[1])",
"def node_type( fdt, node_offset, verbose=0 ):\n rt = \"\"\n try:\n node = fdt.get_node( node_offset )\n rt = node.props[\"compatible\"].to_string()\n except Exception as e:\n pass\n\n return rt",
"def get_supported_os(self):\r\n return self.os",
"def get_os() -> str:\n system = platform.system().lower()\n\n if system == \"linux\":\n machine = os.uname().machine\n if machine.startswith(\"arm\") or machine.startswith(\"aarch\"):\n system = \"pi\"\n\n return system + \"_\" + platform.architecture()[0]",
"def node_type(self) -> str:\n return pulumi.get(self, \"node_type\")",
"def platform():\n if 'OS' in gyp_defines():\n if 'android' in gyp_defines()['OS']:\n return 'android'\n else:\n return gyp_defines()['OS']\n elif IsWindows():\n return 'win'\n elif IsLinux():\n return 'linux'\n else:\n return 'mac'",
"def test_os_node(self):\n self.assertEqual(self.settings.OS_NODE, platform.node())",
"def get_os_class(host):\n for h in host:\n os_class = h.osclass\n if os_class is not None:\n os_class = str(os_class)\n string_os_class = os_class.split('\"')[1].split('\"')[0]\n return string_os_class\n else:\n return \"No OS class available.\"",
"def node_type(self) -> Optional[str]:\n return pulumi.get(self, \"node_type\")",
"def operatingsystem(self):\n # type: () -> string_types\n return self._operatingsystem",
"def get_osversion(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetOSVersion', self.handle)"
] |
[
"0.806094",
"0.7566977",
"0.7566977",
"0.7524138",
"0.7362434",
"0.73300123",
"0.73274463",
"0.7290057",
"0.7276726",
"0.7245464",
"0.6884631",
"0.6883109",
"0.6619979",
"0.6607807",
"0.65581787",
"0.6537669",
"0.65150976",
"0.6427563",
"0.6365031",
"0.6364053",
"0.6318775",
"0.62981915",
"0.6275582",
"0.62533504",
"0.6222385",
"0.62080896",
"0.6193361",
"0.61927366",
"0.61847484",
"0.6136198"
] |
0.8594602
|
0
|
Obtain the value of aci_subnet_name.
|
def get_aci_subnet_name(self) -> Union[str, None]:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get("CONST_VIRTUAL_NODE_ADDON_NAME")
CONST_VIRTUAL_NODE_SUBNET_NAME = addon_consts.get("CONST_VIRTUAL_NODE_SUBNET_NAME")
# read the original value passed by the command
aci_subnet_name = self.raw_param.get("aci_subnet_name")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_VIRTUAL_NODE_ADDON_NAME +
self.get_virtual_node_addon_os_type()
in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_VIRTUAL_NODE_ADDON_NAME +
self.get_virtual_node_addon_os_type()
).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME) is not None
):
aci_subnet_name = self.mc.addon_profiles.get(
CONST_VIRTUAL_NODE_ADDON_NAME +
self.get_virtual_node_addon_os_type()
).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME)
# this parameter does not need dynamic completion
# this parameter does not need validation
return aci_subnet_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def subnet_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subnet_group_name\")",
"def cluster_subnet(self) -> str:\n return pulumi.get(self, \"cluster_subnet\")",
"def subnet_group_name(self) -> str:\n return pulumi.get(self, \"subnet_group_name\")",
"def subnet_id(self) -> str:\n return pulumi.get(self, \"subnet_id\")",
"def service_subnet(self) -> str:\n return pulumi.get(self, \"service_subnet\")",
"def subnet_id(self) -> Optional[str]:\n return pulumi.get(self, \"subnet_id\")",
"def subnet_group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_group_name\")",
"def subnet_group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_group_name\")",
"def subnet_group_name(self) -> Optional[str]:\n return pulumi.get(self, \"subnet_group_name\")",
"def virtual_network_subnet_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")",
"def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")",
"def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:\n return pulumi.get(self, \"subnet\")",
"def virtual_network_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def virtual_network_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def sc_subnet(self):\n return self._sc_subnet",
"def subnet(self) -> pulumi.Output[Optional['outputs.ResourceIdResponse']]:\n return pulumi.get(self, \"subnet\")",
"def app_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_subnet_id\")",
"def subnet_id(self):\n return self._subnet_id",
"def get_vnet_subnet_id(self) -> Union[str, None]:\n return self.agentpool_context.get_vnet_subnet_id()",
"def get_appgw_subnet_id(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_SUBNET_ID = addon_consts.get(\"CONST_INGRESS_APPGW_SUBNET_ID\")\n\n # read the original value passed by the command\n appgw_subnet_id = self.raw_param.get(\"appgw_subnet_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_ID) is not None\n ):\n appgw_subnet_id = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_ID)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_subnet_id",
"def get_appgw_subnet_cidr(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_SUBNET_CIDR = addon_consts.get(\"CONST_INGRESS_APPGW_SUBNET_CIDR\")\n\n # read the original value passed by the command\n appgw_subnet_cidr = self.raw_param.get(\"appgw_subnet_cidr\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR) is not None\n ):\n appgw_subnet_cidr = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_subnet_cidr",
"def subnet_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"subnet_prefix\")",
"def subnet(self) -> Optional['outputs.ApiEntityReferenceResponse']:\n return pulumi.get(self, \"subnet\")",
"def subnetwork_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnetwork_name\")",
"def subnet_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_resource_id\")",
"def get_subnet_details(self, subnet_name=\"dummy_subnet\", subnet_id=None):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/subnets\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n result = self.request(\"GET\", _url, _headers, _body)\n if result is None:\n LOG_OBJ.error(\"No response from Server while getting subnets\")\n return result\n if result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get subnet details Failed with status %s \" %\n result.status)\n return result.status\n\n output = json.loads(result.data)\n\n for subnets in output['subnets']:\n if (subnet_id is not None and (subnets['id'] == subnet_id)) or\\\n subnets['name'].lower() == subnet_name.lower():\n LOG_OBJ.debug(\"Subnet Details: %s\" % subnets)\n return subnets\n\n LOG_OBJ.error(\"Subnet with name:%s or with id:%s is Not Found\" %\n (subnet_name, subnet_id))",
"def service_runtime_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_runtime_subnet_id\")",
"def subnet_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"subnet_arns\")",
"def subnet_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"subnet_arns\")"
] |
[
"0.73947215",
"0.73365384",
"0.73025113",
"0.7265406",
"0.72164613",
"0.719906",
"0.713476",
"0.713476",
"0.7115182",
"0.7029042",
"0.7018052",
"0.7018052",
"0.69657993",
"0.6899262",
"0.6899262",
"0.68794215",
"0.6759969",
"0.6753503",
"0.67428374",
"0.6624055",
"0.6604675",
"0.65278244",
"0.6516636",
"0.64086246",
"0.6400635",
"0.6377607",
"0.6237597",
"0.60223734",
"0.5977851",
"0.5977851"
] |
0.8567893
|
0
|
Obtain the value of appgw_name.
|
def get_appgw_name(self) -> Union[str, None]:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get("CONST_INGRESS_APPGW_ADDON_NAME")
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME = addon_consts.get(
"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME"
)
# read the original value passed by the command
appgw_name = self.raw_param.get("appgw_name")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME) is not None
):
appgw_name = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_app_name(app):\n return app[APP_NAME_KEY]",
"def getApplicationName(self) -> unicode:\n ...",
"def get_appgw_id(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID = addon_consts.get(\"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID\")\n\n # read the original value passed by the command\n appgw_id = self.raw_param.get(\"appgw_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID) is not None\n ):\n appgw_id = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_id",
"def get_app_name(self):\n return getattr(self, '_app_name', None)",
"def app_name(self):\n return self._app_name",
"def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")",
"def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")",
"def app_name(self) -> str:\n return self._app_name",
"def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")",
"def get_name():\n return config.APP_NAME",
"def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")",
"def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")",
"def get_name(self, name):\n return self.apps[name]['name']",
"def app_name(self): # pylint:disable=function-redefined\n return self._app_name",
"def application_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_name\")",
"def name(self):\n return self.application_tree['name']",
"def _extract_appname(self, log):\n appname = \"\"\n if \"appLaunch\" in log:\n appname = log[\"appLaunch\"][\"appName\"]\n else:\n self.logger.info(\"no applaunch field\")\n self.logger.info(log[\"event\"])\n pass \n \n return appname",
"def get_appgw_watch_namespace(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_WATCH_NAMESPACE = addon_consts.get(\"CONST_INGRESS_APPGW_WATCH_NAMESPACE\")\n\n # read the original value passed by the command\n appgw_watch_namespace = self.raw_param.get(\"appgw_watch_namespace\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_WATCH_NAMESPACE) is not None\n ):\n appgw_watch_namespace = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_WATCH_NAMESPACE)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_watch_namespace",
"def name(self):\r\n if self._name is not None:\r\n return self._name\r\n else:\r\n try:\r\n return Inspection.find_application_name()\r\n # TODO(wickman) Be more specific\r\n except Exception:\r\n return 'unknown'",
"def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")",
"def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")",
"def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")",
"def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")",
"def gateway_name(self):\n return self.msg.gateway_name",
"def _get_app_name(self):\n # TODO move app name into pyglet.app (also useful for OS X menu bar?).\n return sys.argv[0]",
"def gateway_name(self) -> str:\n return pulumi.get(self, \"gateway_name\")",
"def get_name(self):\n return self.settings.get(\"name\", None)",
"def get_name(app):\n from uuid import uuid4 as uuid\n return (f'accelpy_{app[\"application\"][\"product_id\"]}'\n f'_{str(uuid()).replace(\"-\", \"\")[:8]}')",
"def name(self) -> str | None:\n return self.status.get(\"UPSNAME\")",
"def get_app_name(i):\n return app_id + '-' + str(i)"
] |
[
"0.74616367",
"0.7233052",
"0.7183323",
"0.7180863",
"0.7084527",
"0.70705265",
"0.70004845",
"0.693833",
"0.6933758",
"0.6931009",
"0.6879082",
"0.6879082",
"0.6818683",
"0.6816083",
"0.67015773",
"0.6605833",
"0.65939206",
"0.65772164",
"0.6567707",
"0.6544749",
"0.6544749",
"0.6544749",
"0.6544749",
"0.64026743",
"0.6382005",
"0.6377448",
"0.6366689",
"0.62928635",
"0.62744105",
"0.62732154"
] |
0.85585195
|
0
|
Obtain the value of appgw_subnet_cidr.
|
def get_appgw_subnet_cidr(self) -> Union[str, None]:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get("CONST_INGRESS_APPGW_ADDON_NAME")
CONST_INGRESS_APPGW_SUBNET_CIDR = addon_consts.get("CONST_INGRESS_APPGW_SUBNET_CIDR")
# read the original value passed by the command
appgw_subnet_cidr = self.raw_param.get("appgw_subnet_cidr")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR) is not None
):
appgw_subnet_cidr = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_subnet_cidr
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_appgw_subnet_id(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_SUBNET_ID = addon_consts.get(\"CONST_INGRESS_APPGW_SUBNET_ID\")\n\n # read the original value passed by the command\n appgw_subnet_id = self.raw_param.get(\"appgw_subnet_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_ID) is not None\n ):\n appgw_subnet_id = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_ID)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_subnet_id",
"def cidr(self):\n return self._cidr",
"def app_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_subnet_id\")",
"def cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cidr\")",
"def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:\n return pulumi.get(self, \"subnet\")",
"def subnet(self) -> pulumi.Output[Optional['outputs.ResourceIdResponse']]:\n return pulumi.get(self, \"subnet\")",
"def subnet_id(self) -> Optional[str]:\n return pulumi.get(self, \"subnet_id\")",
"def cidr(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cidr\")",
"def cidr(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cidr\")",
"def subnet_id(self) -> str:\n return pulumi.get(self, \"subnet_id\")",
"def virtual_network_subnet_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def get_vnet_subnet_id(self) -> Union[str, None]:\n return self.agentpool_context.get_vnet_subnet_id()",
"def subnet_id(self):\n return self._subnet_id",
"def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")",
"def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")",
"def virtual_network_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def virtual_network_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def service_subnet(self) -> str:\n return pulumi.get(self, \"service_subnet\")",
"def pod_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pod_cidr\")",
"def pod_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pod_cidr\")",
"def subnet(self) -> Optional['outputs.ApiEntityReferenceResponse']:\n return pulumi.get(self, \"subnet\")",
"def cidr_block(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cidr_block\")",
"def cluster_subnet(self) -> str:\n return pulumi.get(self, \"cluster_subnet\")",
"def service_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_cidr\")",
"def service_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_cidr\")",
"def service_cidr(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_cidr\")",
"def get_aci_subnet_name(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get(\"CONST_VIRTUAL_NODE_ADDON_NAME\")\n CONST_VIRTUAL_NODE_SUBNET_NAME = addon_consts.get(\"CONST_VIRTUAL_NODE_SUBNET_NAME\")\n\n # read the original value passed by the command\n aci_subnet_name = self.raw_param.get(\"aci_subnet_name\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_VIRTUAL_NODE_ADDON_NAME +\n self.get_virtual_node_addon_os_type()\n in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_VIRTUAL_NODE_ADDON_NAME +\n self.get_virtual_node_addon_os_type()\n ).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME) is not None\n ):\n aci_subnet_name = self.mc.addon_profiles.get(\n CONST_VIRTUAL_NODE_ADDON_NAME +\n self.get_virtual_node_addon_os_type()\n ).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return aci_subnet_name"
] |
[
"0.7323157",
"0.7128274",
"0.7106472",
"0.7103108",
"0.7103108",
"0.7060922",
"0.7060922",
"0.7002869",
"0.69952387",
"0.69638777",
"0.6960986",
"0.6960986",
"0.6935144",
"0.6803388",
"0.6785884",
"0.6731447",
"0.6727425",
"0.6727425",
"0.6703221",
"0.6703221",
"0.66906685",
"0.66869956",
"0.66869956",
"0.66517633",
"0.65439284",
"0.6519839",
"0.6463763",
"0.6463763",
"0.6463763",
"0.64603674"
] |
0.8577678
|
0
|
Obtain the value of appgw_id.
|
def get_appgw_id(self) -> Union[str, None]:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get("CONST_INGRESS_APPGW_ADDON_NAME")
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID = addon_consts.get("CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID")
# read the original value passed by the command
appgw_id = self.raw_param.get("appgw_id")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID) is not None
):
appgw_id = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def app_id(self):\n return self._app_id",
"def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")",
"def appid(self):\n return self._item[\"appid\"]",
"def app_id(self) -> str:\n return self._app_id",
"def get_appgw_name(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME = addon_consts.get(\n \"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME\"\n )\n\n # read the original value passed by the command\n appgw_name = self.raw_param.get(\"appgw_name\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME) is not None\n ):\n appgw_name = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_name",
"def app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_id\")",
"def get_appgw_subnet_id(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_SUBNET_ID = addon_consts.get(\"CONST_INGRESS_APPGW_SUBNET_ID\")\n\n # read the original value passed by the command\n appgw_subnet_id = self.raw_param.get(\"appgw_subnet_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_ID) is not None\n ):\n appgw_subnet_id = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_ID)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_subnet_id",
"def app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"app_id\")",
"def application_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_id\")",
"def client_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_app_id\")",
"def ApplicationId(self) -> _n_0_t_0:",
"def application_id(self) -> Optional[str]:\n return pulumi.get(self, \"application_id\")",
"def app_id(self):\n return self._app_id or self._modules['default'].data.get('application')",
"def application_object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_object_id\")",
"def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")",
"def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")",
"def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")",
"def application_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_id\")",
"def get_app_id(form):\n return getattr(form, \"app_id\", None)",
"def _app_id(self):\n return '{}-{}'.format(self.config['app']['name'],\n self.config['app']['version'])",
"def single_tenant_app_id(self):\n if \"singleTenantAppId\" in self._prop_dict:\n return self._prop_dict[\"singleTenantAppId\"]\n else:\n return None",
"def application_object_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_object_id\")",
"def server_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_app_id\")",
"def vpn_gateway_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vpn_gateway_id\")",
"def get_client_id() -> str:\n from .util import get_env_value, is_env_key\n client_id = cfg.client_id\n if is_env_key(client_id):\n value = get_env_value(client_id)\n if value is None:\n print(f'could not get CLIENT_ID from environment with key: {client_id[4:]}')\n input('\\npress enter to exit...')\n exit(1)\n return value\n return client_id",
"def app_version_id(self):\n return self._app_version_id",
"def app_id(self):\n return self._chromecast.app_id if self._chromecast else None",
"def get_google_id (self):\n return self._google_id",
"def application_object_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_object_id\")",
"def apple_id(self):\n if \"appleId\" in self._prop_dict:\n return self._prop_dict[\"appleId\"]\n else:\n return None"
] |
[
"0.721044",
"0.7155202",
"0.71197546",
"0.7063182",
"0.6951479",
"0.69196516",
"0.6867817",
"0.6846431",
"0.6845491",
"0.6831396",
"0.6828174",
"0.6737395",
"0.66991",
"0.6506326",
"0.6498896",
"0.6498896",
"0.6498896",
"0.64338803",
"0.64220214",
"0.639425",
"0.6338759",
"0.6160762",
"0.6141728",
"0.6110583",
"0.6106903",
"0.6087056",
"0.6086534",
"0.60775584",
"0.6049086",
"0.60484594"
] |
0.83992416
|
0
|
Obtain the value of appgw_subnet_id.
|
def get_appgw_subnet_id(self) -> Union[str, None]:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get("CONST_INGRESS_APPGW_ADDON_NAME")
CONST_INGRESS_APPGW_SUBNET_ID = addon_consts.get("CONST_INGRESS_APPGW_SUBNET_ID")
# read the original value passed by the command
appgw_subnet_id = self.raw_param.get("appgw_subnet_id")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_ID) is not None
):
appgw_subnet_id = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_ID)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_subnet_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def app_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_subnet_id\")",
"def subnet_id(self) -> Optional[str]:\n return pulumi.get(self, \"subnet_id\")",
"def subnet_id(self) -> str:\n return pulumi.get(self, \"subnet_id\")",
"def subnet_id(self):\n return self._subnet_id",
"def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")",
"def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")",
"def get_appgw_subnet_cidr(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_SUBNET_CIDR = addon_consts.get(\"CONST_INGRESS_APPGW_SUBNET_CIDR\")\n\n # read the original value passed by the command\n appgw_subnet_cidr = self.raw_param.get(\"appgw_subnet_cidr\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR) is not None\n ):\n appgw_subnet_cidr = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_subnet_cidr",
"def virtual_network_subnet_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def virtual_network_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def virtual_network_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"virtual_network_subnet_id\")",
"def get_vnet_subnet_id(self) -> Union[str, None]:\n return self.agentpool_context.get_vnet_subnet_id()",
"def subnet_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_resource_id\")",
"def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:\n return pulumi.get(self, \"subnet\")",
"def service_runtime_subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_runtime_subnet_id\")",
"def service_subnet(self) -> str:\n return pulumi.get(self, \"service_subnet\")",
"def subnet(self) -> pulumi.Output[Optional['outputs.ResourceIdResponse']]:\n return pulumi.get(self, \"subnet\")",
"def sc_subnet(self):\n return self._sc_subnet",
"def get_aci_subnet_name(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_VIRTUAL_NODE_ADDON_NAME = addon_consts.get(\"CONST_VIRTUAL_NODE_ADDON_NAME\")\n CONST_VIRTUAL_NODE_SUBNET_NAME = addon_consts.get(\"CONST_VIRTUAL_NODE_SUBNET_NAME\")\n\n # read the original value passed by the command\n aci_subnet_name = self.raw_param.get(\"aci_subnet_name\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_VIRTUAL_NODE_ADDON_NAME +\n self.get_virtual_node_addon_os_type()\n in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_VIRTUAL_NODE_ADDON_NAME +\n self.get_virtual_node_addon_os_type()\n ).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME) is not None\n ):\n aci_subnet_name = self.mc.addon_profiles.get(\n CONST_VIRTUAL_NODE_ADDON_NAME +\n self.get_virtual_node_addon_os_type()\n ).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return aci_subnet_name",
"def subnet(self) -> Optional['outputs.ApiEntityReferenceResponse']:\n return pulumi.get(self, \"subnet\")",
"def cluster_subnet(self) -> str:\n return pulumi.get(self, \"cluster_subnet\")",
"def get_appgw_id(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID = addon_consts.get(\"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID\")\n\n # read the original value passed by the command\n appgw_id = self.raw_param.get(\"appgw_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID) is not None\n ):\n appgw_id = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_id",
"def subnet_group_name(self) -> Optional[str]:\n return pulumi.get(self, \"subnet_group_name\")",
"def subnet_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subnet_group_name\")",
"def subnet_group_name(self) -> str:\n return pulumi.get(self, \"subnet_group_name\")",
"def get_subnet_by_id(self, id):\n return self.network.get_subnet(id)",
"def subnet_group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_group_name\")",
"def subnet_group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_group_name\")",
"def subnet_id_lookup(session, subnet_domain):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_subnets(Filters=[{\"Name\": \"tag:Name\", \"Values\": [subnet_domain]}])\n if len(response['Subnets']) == 0:\n return None\n else:\n return response['Subnets'][0]['SubnetId']",
"def get_subnet(self, subnet_id):\n LOG.debug(\"Get subnet %s\", subnet_id)\n\n if subnet_id not in self.subnets_by_id:\n return None\n\n data = self.subnets_by_id[subnet_id]\n LOG.debug(\"Subnet data: %s\", data)\n\n # Convert to form expected by NetModel.\n ip_version = 6 if ':' in data['cidr'] else 4\n subnet = {'enable_dhcp': True,\n 'ip_version': ip_version,\n 'cidr': data['cidr'],\n 'dns_nameservers': data.get('dns_servers') or [],\n 'id': subnet_id,\n 'gateway_ip': data['gateway_ip'],\n 'host_routes': data.get('host_routes', []),\n 'network_id': data.get('network_id', NETWORK_ID)}\n if ip_version == 6:\n subnet['ipv6_address_mode'] = DHCPV6_STATEFUL\n subnet['ipv6_ra_mode'] = DHCPV6_STATEFUL\n\n return dhcp.DictModel(subnet)",
"def vnet_subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"vnet_subnet_ids\")"
] |
[
"0.8513004",
"0.8058407",
"0.80336833",
"0.7891081",
"0.7837524",
"0.7837524",
"0.7442669",
"0.7367331",
"0.7367235",
"0.7367235",
"0.7299241",
"0.7105402",
"0.7026113",
"0.6965385",
"0.6806486",
"0.6794416",
"0.66188806",
"0.65474635",
"0.6447645",
"0.63097215",
"0.62285256",
"0.61513954",
"0.6135969",
"0.6135344",
"0.61190546",
"0.6019118",
"0.6019118",
"0.59989184",
"0.5860628",
"0.58223146"
] |
0.86212575
|
0
|
Obtain the value of appgw_watch_namespace.
|
def get_appgw_watch_namespace(self) -> Union[str, None]:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get("CONST_INGRESS_APPGW_ADDON_NAME")
CONST_INGRESS_APPGW_WATCH_NAMESPACE = addon_consts.get("CONST_INGRESS_APPGW_WATCH_NAMESPACE")
# read the original value passed by the command
appgw_watch_namespace = self.raw_param.get("appgw_watch_namespace")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_WATCH_NAMESPACE) is not None
):
appgw_watch_namespace = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_WATCH_NAMESPACE)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_watch_namespace
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_namespace(self) -> str:\n return self._namespace",
"def metric_namespace(self) -> Optional[str]:\n return pulumi.get(self, \"metric_namespace\")",
"def metric_namespace(self) -> Optional[str]:\n return pulumi.get(self, \"metric_namespace\")",
"def namespace(self) -> Optional[str]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[str]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[str]:\n return pulumi.get(self, \"namespace\")",
"def metric_namespace(self) -> str:\n return self._values.get('metric_namespace')",
"def metric_namespace(self) -> str:\n return self._values.get('metric_namespace')",
"def namespace(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> str:\n return pulumi.get(self, \"namespace\")",
"def namespace(self):\n return self._execution_namespace.maps[0]",
"def namespace(self):\n return self._namespace",
"def _namespace(self) -> str:\n with open(\"/var/run/secrets/kubernetes.io/serviceaccount/namespace\", \"r\") as f:\n return f.read().strip()",
"def app_id_namespace(self):\n return self.__key.app_id_namespace()",
"def namespace(self) -> str:\n return self._namespace",
"def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")",
"def namespace(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"namespace\")"
] |
[
"0.66621256",
"0.6606345",
"0.6606345",
"0.65469575",
"0.65469575",
"0.65469575",
"0.65354997",
"0.65354997",
"0.64289856",
"0.64289856",
"0.64289856",
"0.64289856",
"0.64289856",
"0.63780504",
"0.63611513",
"0.6319834",
"0.62419224",
"0.623522",
"0.62212163",
"0.61980546",
"0.61980546",
"0.61980546",
"0.61980546",
"0.61980546",
"0.61980546",
"0.61980546",
"0.61980546",
"0.61980546",
"0.61980546",
"0.61980546"
] |
0.85458755
|
0
|
Obtain the value of enable_sgxquotehelper.
|
def get_enable_sgxquotehelper(self) -> bool:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_CONFCOM_ADDON_NAME = addon_consts.get("CONST_CONFCOM_ADDON_NAME")
CONST_ACC_SGX_QUOTE_HELPER_ENABLED = addon_consts.get("CONST_ACC_SGX_QUOTE_HELPER_ENABLED")
# read the original value passed by the command
enable_sgxquotehelper = self.raw_param.get("enable_sgxquotehelper")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_CONFCOM_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_CONFCOM_ADDON_NAME
).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) is not None
):
enable_sgxquotehelper = self.mc.addon_profiles.get(
CONST_CONFCOM_ADDON_NAME
).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) == "true"
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_sgxquotehelper
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def include_quote(self) -> Optional[bool]:\n return pulumi.get(self, \"include_quote\")",
"def get_quotes(self):\n # However ignore the 'true' autodetection setting.\n jscs_quotes = self.jscs_options.get('validateQuoteMarks')\n if isinstance(jscs_quotes, dict):\n jscs_quotes = jscs_quotes.get('mark')\n if jscs_quotes and jscs_quotes is not True:\n return jscs_quotes\n\n # Use whatever quote type is set in preferences\n return get_quotes()",
"def _get_enable(self):\n return self.__enable",
"def _get_enable(self):\n return self.__enable",
"def stock_expiry_enabled():\n from common.models import InvenTreeSetting\n\n return InvenTreeSetting.get_setting('STOCK_ENABLE_EXPIRY', False, create=False)",
"def get_magic_quotes_gpc():\n raise NotImplementedError()",
"def enabled(self):\n return self._get('enabled')",
"def quotagpu(self):\n return self._quotagpu",
"def get(cls, provider_name):\r\n cls._check_configured()\r\n return cls._ENABLED.get(provider_name)",
"def EnableLicenseCheck(self):\n return self._get_attribute('enableLicenseCheck')",
"def use_config_enable_qty_inc(self):\n return self._use_config_enable_qty_inc",
"def get_flag(self):\n price_data = self.get_price_data()\n if price_data.get('flag'):\n return price_data.get('flag')\n return None",
"def allow_quote_request(self):\n return self._allow_quote_request",
"def quote_id(self) -> Optional[str]:\n return pulumi.get(self, \"quote_id\")",
"def get_id(self):\n return \"always_true_plugin\"",
"def get_symbol(self):\n return self.symbol",
"def is_enabled(self):\n siteconfig = SiteConfiguration.objects.get_current()\n return siteconfig.get('%s_enabled' % self.backend_id, False)",
"def shortenable(s):\n return s, True",
"def get_enable_image_cleaner(self) -> bool:\n # read the original value passed by the command\n enable_image_cleaner = self.raw_param.get(\"enable_image_cleaner\")\n\n return enable_image_cleaner",
"def get_isenabled(self):\n return self.isenabled",
"def dsp_enable(self):\n return self._dsp_enable",
"def quicken_import_active(request):\r\n return {'QUICKEN_IMPORT_ACTIVE': settings.QUICKEN_IMPORT_ACTIVE}",
"def getquoted(self): # real signature unknown; restored from __doc__\n pass",
"def get_tax_group_enabled(self):\n return self.tax_group_enabled",
"def _get_format(value, quote_mode='always'):\n\n formats = {'always': '{key}=\"{value}\"\\n', 'auto': '{key}={value}\\n'}\n\n if quote_mode not in formats.keys():\n return KeyError(f'quote_mode {quote_mode} is invalid')\n\n _mode = quote_mode\n if quote_mode == 'auto' and ' ' in value:\n _mode = 'always'\n return formats.get(_mode)",
"def get_quantization_capability(self):\n return self.cur_config['capabilities']",
"def radiant_xp_adv(self):\n return self._get(\"radiant_xp_adv\")",
"def get_id(self):\n return \"always_false_plugin\"",
"def get(self):\n return 'SSL {0}'.format(\n 'enabled' if SSLConfig._is_enabled() else 'disabled')",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True"
] |
[
"0.58308876",
"0.57336015",
"0.57133615",
"0.57133615",
"0.52744067",
"0.51573586",
"0.5150642",
"0.5144941",
"0.5105626",
"0.50925654",
"0.5083573",
"0.5078732",
"0.5014738",
"0.49901444",
"0.4989533",
"0.4974014",
"0.49591228",
"0.49126196",
"0.49086168",
"0.4902224",
"0.49004194",
"0.48963016",
"0.4893412",
"0.48874477",
"0.4883213",
"0.48827878",
"0.48394102",
"0.48332644",
"0.4824305",
"0.48221475"
] |
0.87389165
|
0
|
Internal function to obtain the value of enable_secret_rotation. This function supports the option of enable_validation. When enabled, in update mode, if enable_secret_rotation is specified but azure keyvault secret provider addon is not enabled, an InvalidArgumentValueError will be raised.
|
def _get_enable_secret_rotation(self, enable_validation: bool = False) -> bool:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(
"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME"
)
CONST_SECRET_ROTATION_ENABLED = addon_consts.get(
"CONST_SECRET_ROTATION_ENABLED"
)
# read the original value passed by the command
enable_secret_rotation = self.raw_param.get("enable_secret_rotation")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.addon_profiles and
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME
).config.get(CONST_SECRET_ROTATION_ENABLED) is not None
):
enable_secret_rotation = self.mc.addon_profiles.get(
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME
).config.get(CONST_SECRET_ROTATION_ENABLED) == "true"
# this parameter does not need dynamic completion
# validation
if enable_validation:
if self.decorator_mode == DecoratorMode.UPDATE:
if enable_secret_rotation:
azure_keyvault_secrets_provider_enabled = (
self.mc and
self.mc.addon_profiles and
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled
)
if not azure_keyvault_secrets_provider_enabled:
raise InvalidArgumentValueError(
"--enable-secret-rotation can only be specified "
"when azure-keyvault-secrets-provider is enabled. "
"Please use command 'az aks enable-addons' to enable it."
)
return enable_secret_rotation
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_disable_secret_rotation(self, enable_validation: bool = False) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n # read the original value passed by the command\n disable_secret_rotation = self.raw_param.get(\"disable_secret_rotation\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if disable_secret_rotation:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--disable-secret-rotation can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled. \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return disable_secret_rotation",
"def get_enable_secret_rotation(self) -> bool:\n return self._get_enable_secret_rotation(enable_validation=True)",
"def get_disable_secret_rotation(self) -> bool:\n return self._get_disable_secret_rotation(enable_validation=True)",
"def _get_rotation_poll_interval(self, enable_validation: bool = False) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n # read the original value passed by the command\n rotation_poll_interval = self.raw_param.get(\"rotation_poll_interval\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_ROTATION_POLL_INTERVAL) is not None\n ):\n rotation_poll_interval = self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_ROTATION_POLL_INTERVAL)\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if rotation_poll_interval:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--rotation-poll-interval can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return rotation_poll_interval",
"def rotate_webhook_secret(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"rotate_webhook_secret\")",
"def update_azure_keyvault_secrets_provider_addon_profile(\n self,\n azure_keyvault_secrets_provider_addon_profile: ManagedClusterAddonProfile,\n ) -> ManagedClusterAddonProfile:\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_SECRET_ROTATION_ENABLED = addon_consts.get(\n \"CONST_SECRET_ROTATION_ENABLED\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n if self.context.get_enable_secret_rotation():\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_SECRET_ROTATION_ENABLED\n ] = \"true\"\n\n if self.context.get_disable_secret_rotation():\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_SECRET_ROTATION_ENABLED\n ] = \"false\"\n\n if self.context.get_rotation_poll_interval() is not None:\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_ROTATION_POLL_INTERVAL\n ] = self.context.get_rotation_poll_interval()\n return azure_keyvault_secrets_provider_addon_profile",
"def _get_enable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_keyvault_kms = self.raw_param.get(\"enable_azure_keyvault_kms\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"security_profile\") and # backward compatibility\n self.mc.security_profile and\n self.mc.security_profile.azure_key_vault_kms\n ):\n enable_azure_keyvault_kms = self.mc.security_profile.azure_key_vault_kms.enabled\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if bool(enable_azure_keyvault_kms) != bool(self._get_azure_keyvault_kms_key_id(enable_validation=False)):\n raise RequiredArgumentMissingError(\n 'You must set \"--enable-azure-keyvault-kms\" and \"--azure-keyvault-kms-key-id\" at the same time.'\n )\n\n return enable_azure_keyvault_kms",
"def _get_azure_keyvault_kms_key_vault_resource_id(self, enable_validation: bool = False) -> Union[str, None]:\n # read the original value passed by the command\n azure_keyvault_kms_key_vault_resource_id = self.raw_param.get(\n \"azure_keyvault_kms_key_vault_resource_id\"\n )\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"security_profile\") and # backward compatibility\n self.mc.security_profile and\n self.mc.security_profile.azure_key_vault_kms and\n self.mc.security_profile.azure_key_vault_kms.key_vault_resource_id is not None\n ):\n azure_keyvault_kms_key_vault_resource_id = (\n self.mc.security_profile.azure_key_vault_kms.key_vault_resource_id\n )\n\n # validation\n if enable_validation:\n enable_azure_keyvault_kms = self._get_enable_azure_keyvault_kms(\n enable_validation=False)\n if (\n azure_keyvault_kms_key_vault_resource_id and\n (\n enable_azure_keyvault_kms is None or\n enable_azure_keyvault_kms is False\n )\n ):\n raise RequiredArgumentMissingError(\n '\"--azure-keyvault-kms-key-vault-resource-id\" requires \"--enable-azure-keyvault-kms\".'\n )\n\n key_vault_network_access = self._get_azure_keyvault_kms_key_vault_network_access(\n enable_validation=False)\n if (\n key_vault_network_access == CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE and\n (\n azure_keyvault_kms_key_vault_resource_id is None or\n azure_keyvault_kms_key_vault_resource_id == \"\"\n )\n ):\n raise ArgumentUsageError(\n '\"--azure-keyvault-kms-key-vault-resource-id\" can not be empty if '\n '\"--azure-keyvault-kms-key-vault-network-access\" is \"Private\".'\n )\n if (\n key_vault_network_access == CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PUBLIC and\n (\n azure_keyvault_kms_key_vault_resource_id is not None and\n azure_keyvault_kms_key_vault_resource_id != \"\"\n )\n ):\n raise ArgumentUsageError(\n '\"--azure-keyvault-kms-key-vault-resource-id\" must be empty if '\n '\"--azure-keyvault-kms-key-vault-network-access\" is \"Public\".'\n )\n\n return azure_keyvault_kms_key_vault_resource_id",
"def build_azure_keyvault_secrets_provider_addon_profile(self) -> ManagedClusterAddonProfile:\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_SECRET_ROTATION_ENABLED = addon_consts.get(\n \"CONST_SECRET_ROTATION_ENABLED\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n azure_keyvault_secrets_provider_addon_profile = (\n self.models.ManagedClusterAddonProfile(\n enabled=True,\n config={\n CONST_SECRET_ROTATION_ENABLED: \"false\",\n CONST_ROTATION_POLL_INTERVAL: \"2m\",\n },\n )\n )\n if self.context.get_enable_secret_rotation():\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_SECRET_ROTATION_ENABLED\n ] = \"true\"\n if self.context.get_rotation_poll_interval() is not None:\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_ROTATION_POLL_INTERVAL\n ] = self.context.get_rotation_poll_interval()\n return azure_keyvault_secrets_provider_addon_profile",
"def _get_disable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_azure_keyvault_kms = self.raw_param.get(\"disable_azure_keyvault_kms\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_azure_keyvault_kms and self._get_enable_azure_keyvault_kms(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-azure-keyvault-kms and --disable-azure-keyvault-kms at the same time.\"\n )\n\n return disable_azure_keyvault_kms",
"def secret(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"secret\")",
"def _get_enable_aad(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_aad = self.raw_param.get(\"enable_aad\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.managed is not None\n ):\n enable_aad = self.mc.aad_profile.managed\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n enable_validation=False\n )\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if enable_aad:\n if check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-aad\" if managed AAD is already enabled'\n )\n return enable_aad",
"def _get_enable_azure_rbac(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_rbac = self.raw_param.get(\"enable_azure_rbac\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.enable_azure_rbac is not None\n ):\n enable_azure_rbac = self.mc.aad_profile.enable_azure_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_azure_rbac:\n if self.decorator_mode == DecoratorMode.CREATE:\n if not self._get_enable_aad(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n if self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--enable-azure-rbac cannot be used together with --disable-rbac\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-azure-rbac\" if managed AAD is not enabled'\n )\n if self._get_disable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-azure-rbac\" and \"--disable-azure-rbac\" at the same time'\n )\n return enable_azure_rbac",
"def _get_azure_keyvault_kms_key_id(self, enable_validation: bool = False) -> Union[str, None]:\n # read the original value passed by the command\n azure_keyvault_kms_key_id = self.raw_param.get(\"azure_keyvault_kms_key_id\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"security_profile\") and # backward compatibility\n self.mc.security_profile and\n self.mc.security_profile.azure_key_vault_kms and\n self.mc.security_profile.azure_key_vault_kms.key_id is not None\n ):\n azure_keyvault_kms_key_id = self.mc.security_profile.azure_key_vault_kms.key_id\n\n if enable_validation:\n enable_azure_keyvault_kms = self._get_enable_azure_keyvault_kms(\n enable_validation=False)\n if (\n azure_keyvault_kms_key_id and\n (\n enable_azure_keyvault_kms is None or\n enable_azure_keyvault_kms is False\n )\n ):\n raise RequiredArgumentMissingError(\n '\"--azure-keyvault-kms-key-id\" requires \"--enable-azure-keyvault-kms\".')\n\n return azure_keyvault_kms_key_id",
"def get_enable_azure_keyvault_kms(self) -> bool:\n return self._get_enable_azure_keyvault_kms(enable_validation=True)",
"def _get_disable_azure_rbac(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_azure_rbac = self.raw_param.get(\"disable_azure_rbac\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_azure_rbac:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--disable-azure-rbac\" if managed AAD is not enabled'\n )\n if self._get_enable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-azure-rbac\" and \"--disable-azure-rbac\" at the same time'\n )\n return disable_azure_rbac",
"def encryption_settings_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"encryption_settings_version\")",
"def secret_key_ref(self) -> Optional['outputs.CSIUnitySpecDriverCommonEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIUnitySpecDriverControllerEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def aes_encryption(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"aes_encryption\")",
"def auto_key_rotation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_key_rotation\")",
"def auto_key_rotation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_key_rotation\")",
"def get_rotation_encryption_angle(self):\n return self.__rotation_encryption_angle",
"def secret_key_ref(self) -> Optional['outputs.CSIPowerMaxSpecDriverControllerEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIPowerStoreSpecDriverControllerEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIIsilonSpecDriverCommonEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def auto_key_rotation(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_key_rotation\")",
"def _get_enable_rbac(self, enable_validation: bool = False) -> Union[bool, None]:\n # read the original value passed by the command\n enable_rbac = self.raw_param.get(\"enable_rbac\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.enable_rbac is not None\n ):\n enable_rbac = self.mc.enable_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_rbac and self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\"specify either '--disable-rbac' or '--enable-rbac', not both.\")\n return enable_rbac",
"def secret_key_ref(self) -> Optional['outputs.CSIIsilonSpecDriverControllerEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def _get_azure_keyvault_kms_key_vault_network_access(self, enable_validation: bool = False) -> Union[str, None]:\n # read the original value passed by the command\n azure_keyvault_kms_key_vault_network_access = self.raw_param.get(\n \"azure_keyvault_kms_key_vault_network_access\"\n )\n\n # validation\n if enable_validation:\n enable_azure_keyvault_kms = self._get_enable_azure_keyvault_kms(\n enable_validation=False)\n if azure_keyvault_kms_key_vault_network_access is None:\n raise RequiredArgumentMissingError(\n '\"--azure-keyvault-kms-key-vault-network-access\" is required.')\n\n if (\n azure_keyvault_kms_key_vault_network_access and\n (\n enable_azure_keyvault_kms is None or\n enable_azure_keyvault_kms is False\n )\n ):\n raise RequiredArgumentMissingError(\n '\"--azure-keyvault-kms-key-vault-network-access\" requires \"--enable-azure-keyvault-kms\".')\n\n if azure_keyvault_kms_key_vault_network_access == CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE:\n key_vault_resource_id = self._get_azure_keyvault_kms_key_vault_resource_id(\n enable_validation=False)\n if (\n key_vault_resource_id is None or\n key_vault_resource_id == \"\"\n ):\n raise RequiredArgumentMissingError(\n '\"--azure-keyvault-kms-key-vault-resource-id\" is required '\n 'when \"--azure-keyvault-kms-key-vault-network-access\" is Private.'\n )\n\n return azure_keyvault_kms_key_vault_network_access"
] |
[
"0.8169251",
"0.77878636",
"0.7083806",
"0.6873693",
"0.6088661",
"0.5972192",
"0.5735775",
"0.56320447",
"0.55335236",
"0.54329765",
"0.5220434",
"0.5207739",
"0.51439744",
"0.51405865",
"0.5066065",
"0.50539535",
"0.5038879",
"0.5003077",
"0.49914554",
"0.49904054",
"0.49884003",
"0.49884003",
"0.4982908",
"0.4978172",
"0.49775177",
"0.4969636",
"0.4966608",
"0.49660045",
"0.49659872",
"0.4963981"
] |
0.84970236
|
0
|
Internal function to obtain the value of disable_secret_rotation. This function supports the option of enable_validation. When enabled, in update mode, if disable_secret_rotation is specified but azure keyvault secret provider addon is not enabled, an InvalidArgumentValueError will be raised.
|
def _get_disable_secret_rotation(self, enable_validation: bool = False) -> bool:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(
"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME"
)
# read the original value passed by the command
disable_secret_rotation = self.raw_param.get("disable_secret_rotation")
# We do not support this option in create mode, therefore we do not read the value from `mc`.
# this parameter does not need dynamic completion
# validation
if enable_validation:
if self.decorator_mode == DecoratorMode.UPDATE:
if disable_secret_rotation:
azure_keyvault_secrets_provider_enabled = (
self.mc and
self.mc.addon_profiles and
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled
)
if not azure_keyvault_secrets_provider_enabled:
raise InvalidArgumentValueError(
"--disable-secret-rotation can only be specified "
"when azure-keyvault-secrets-provider is enabled. "
"Please use command 'az aks enable-addons' to enable it."
)
return disable_secret_rotation
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_disable_secret_rotation(self) -> bool:\n return self._get_disable_secret_rotation(enable_validation=True)",
"def _get_enable_secret_rotation(self, enable_validation: bool = False) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n CONST_SECRET_ROTATION_ENABLED = addon_consts.get(\n \"CONST_SECRET_ROTATION_ENABLED\"\n )\n\n # read the original value passed by the command\n enable_secret_rotation = self.raw_param.get(\"enable_secret_rotation\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_SECRET_ROTATION_ENABLED) is not None\n ):\n enable_secret_rotation = self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_SECRET_ROTATION_ENABLED) == \"true\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if enable_secret_rotation:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--enable-secret-rotation can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled. \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return enable_secret_rotation",
"def get_enable_secret_rotation(self) -> bool:\n return self._get_enable_secret_rotation(enable_validation=True)",
"def _get_rotation_poll_interval(self, enable_validation: bool = False) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n # read the original value passed by the command\n rotation_poll_interval = self.raw_param.get(\"rotation_poll_interval\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_ROTATION_POLL_INTERVAL) is not None\n ):\n rotation_poll_interval = self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_ROTATION_POLL_INTERVAL)\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if rotation_poll_interval:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--rotation-poll-interval can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return rotation_poll_interval",
"def rotate_webhook_secret(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"rotate_webhook_secret\")",
"def _get_disable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_azure_keyvault_kms = self.raw_param.get(\"disable_azure_keyvault_kms\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_azure_keyvault_kms and self._get_enable_azure_keyvault_kms(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-azure-keyvault-kms and --disable-azure-keyvault-kms at the same time.\"\n )\n\n return disable_azure_keyvault_kms",
"def update_azure_keyvault_secrets_provider_addon_profile(\n self,\n azure_keyvault_secrets_provider_addon_profile: ManagedClusterAddonProfile,\n ) -> ManagedClusterAddonProfile:\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_SECRET_ROTATION_ENABLED = addon_consts.get(\n \"CONST_SECRET_ROTATION_ENABLED\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n if self.context.get_enable_secret_rotation():\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_SECRET_ROTATION_ENABLED\n ] = \"true\"\n\n if self.context.get_disable_secret_rotation():\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_SECRET_ROTATION_ENABLED\n ] = \"false\"\n\n if self.context.get_rotation_poll_interval() is not None:\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_ROTATION_POLL_INTERVAL\n ] = self.context.get_rotation_poll_interval()\n return azure_keyvault_secrets_provider_addon_profile",
"def _get_disable_rbac(self, enable_validation: bool = False) -> Union[bool, None]:\n # read the original value passed by the command\n disable_rbac = self.raw_param.get(\"disable_rbac\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.enable_rbac is not None\n ):\n disable_rbac = not self.mc.enable_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_rbac and self._get_enable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--enable-azure-rbac cannot be used together with --disable-rbac\"\n )\n if disable_rbac and self._get_enable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\"specify either '--disable-rbac' or '--enable-rbac', not both.\")\n return disable_rbac",
"def _get_disable_vpa(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_vpa = self.raw_param.get(\"disable_vpa\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_vpa and self._get_enable_vpa(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-vpa and --disable-vpa at the same time.\"\n )\n\n return disable_vpa",
"def get_disable_azure_keyvault_kms(self) -> bool:\n return self._get_disable_azure_keyvault_kms(enable_validation=True)",
"def _get_disable_azure_rbac(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_azure_rbac = self.raw_param.get(\"disable_azure_rbac\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_azure_rbac:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--disable-azure-rbac\" if managed AAD is not enabled'\n )\n if self._get_enable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-azure-rbac\" and \"--disable-azure-rbac\" at the same time'\n )\n return disable_azure_rbac",
"def _get_disable_ahub(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_ahub = self.raw_param.get(\"disable_ahub\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_ahub and self._get_enable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return disable_ahub",
"def _get_secret_value(self):\n try:\n kwargs = {'SecretId': self.name}\n if self.stage is not None:\n kwargs['VersionStage'] = self.stage\n response = self.secretsmanager_client.get_secret_value(**kwargs)\n except ClientError as e:\n logger.error(f\"Unable to retrieve value for secret {self.name}. {type(e)}: {e}\")\n raise\n else:\n return response",
"def get_disable_vpa(self) -> bool:\n return self._get_disable_vpa(enable_validation=True)",
"def _decrypt_secret(\n self, \n encryption_key: str,\n secret_list: List,\n secret_name: str\n ):\n f = Fernet(\n bytes(encryption_key, \"utf-8\")\n )\n secret=None\n if 'secrets' in secret_list:\n if secret_name in secret_list['secrets']:\n secret = f.decrypt(\n bytes(secret_list['secrets'][secret_name], \"utf-8\")\n ).decode('UTF-8')\n #self.log.log_success(\n # f'{secret_name} : {secret}'\n #)\n return secret",
"def _get_secret_string(self):\n result = self._get_secret_value().get('SecretString')\n if result is None:\n raise ValueError(f\"There is no SecretString for named {self.name!r}.\")\n return result",
"def secret(self) -> \"Secret\":\n warnings.warn(\n (\n 'Method \"secret\" is deprecated: insecure, leaves secret in cache.'\n ' Superseded by \"set_secret\"'\n ),\n DeprecationWarning,\n stacklevel=4,\n )\n _args: list[Arg] = []\n _ctx = self._select(\"secret\", _args)\n return Secret(_ctx)",
"def secret(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"secret\")",
"def secret_key_ref(self) -> Optional['outputs.CSIPowerMaxSpecDriverNodeEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIPowerMaxSpecDriverControllerEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secret\")",
"def secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secret\")",
"def secret_key_ref(self) -> Optional['outputs.CSIUnitySpecDriverCommonEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret(self) -> \"Secret\":\n warnings.warn(\n 'Method \"secret\" is deprecated: been superseded by \"set_secret\"',\n DeprecationWarning,\n stacklevel=4,\n )\n _args: list[Arg] = []\n _ctx = self._select(\"secret\", _args)\n return Secret(_ctx)",
"def secret_key_ref(self) -> Optional['outputs.CSIPowerMaxSpecDriverCommonEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def build_azure_keyvault_secrets_provider_addon_profile(self) -> ManagedClusterAddonProfile:\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_SECRET_ROTATION_ENABLED = addon_consts.get(\n \"CONST_SECRET_ROTATION_ENABLED\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n azure_keyvault_secrets_provider_addon_profile = (\n self.models.ManagedClusterAddonProfile(\n enabled=True,\n config={\n CONST_SECRET_ROTATION_ENABLED: \"false\",\n CONST_ROTATION_POLL_INTERVAL: \"2m\",\n },\n )\n )\n if self.context.get_enable_secret_rotation():\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_SECRET_ROTATION_ENABLED\n ] = \"true\"\n if self.context.get_rotation_poll_interval() is not None:\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_ROTATION_POLL_INTERVAL\n ] = self.context.get_rotation_poll_interval()\n return azure_keyvault_secrets_provider_addon_profile",
"def secret_key_ref(self) -> Optional['outputs.CSIUnitySpecDriverNodeEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIUnitySpecDriverControllerEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIPowerMaxSpecDriverSideCarsEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIIsilonSpecDriverNodeEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")"
] |
[
"0.78879106",
"0.7861287",
"0.716901",
"0.64894164",
"0.61119914",
"0.5905805",
"0.5690037",
"0.5635085",
"0.55625933",
"0.549286",
"0.5482957",
"0.54675084",
"0.53691316",
"0.5323151",
"0.5286192",
"0.528051",
"0.5251136",
"0.5234248",
"0.5226052",
"0.5221768",
"0.5184755",
"0.5184755",
"0.5177183",
"0.5171058",
"0.5168337",
"0.51634616",
"0.51559186",
"0.5151277",
"0.51475257",
"0.5145471"
] |
0.8517381
|
0
|
Obtain the value of disable_secret_rotation. This function will verify the parameter by default. In update mode, if disable_secret_rotation is specified but azure keyvault secret provider addon is not enabled, an InvalidArgumentValueError will be raised.
|
def get_disable_secret_rotation(self) -> bool:
return self._get_disable_secret_rotation(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_disable_secret_rotation(self, enable_validation: bool = False) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n # read the original value passed by the command\n disable_secret_rotation = self.raw_param.get(\"disable_secret_rotation\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if disable_secret_rotation:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--disable-secret-rotation can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled. \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return disable_secret_rotation",
"def _get_enable_secret_rotation(self, enable_validation: bool = False) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n CONST_SECRET_ROTATION_ENABLED = addon_consts.get(\n \"CONST_SECRET_ROTATION_ENABLED\"\n )\n\n # read the original value passed by the command\n enable_secret_rotation = self.raw_param.get(\"enable_secret_rotation\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_SECRET_ROTATION_ENABLED) is not None\n ):\n enable_secret_rotation = self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_SECRET_ROTATION_ENABLED) == \"true\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if enable_secret_rotation:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--enable-secret-rotation can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled. \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return enable_secret_rotation",
"def get_enable_secret_rotation(self) -> bool:\n return self._get_enable_secret_rotation(enable_validation=True)",
"def rotate_webhook_secret(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"rotate_webhook_secret\")",
"def _get_rotation_poll_interval(self, enable_validation: bool = False) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n # read the original value passed by the command\n rotation_poll_interval = self.raw_param.get(\"rotation_poll_interval\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_ROTATION_POLL_INTERVAL) is not None\n ):\n rotation_poll_interval = self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_ROTATION_POLL_INTERVAL)\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if rotation_poll_interval:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--rotation-poll-interval can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return rotation_poll_interval",
"def update_azure_keyvault_secrets_provider_addon_profile(\n self,\n azure_keyvault_secrets_provider_addon_profile: ManagedClusterAddonProfile,\n ) -> ManagedClusterAddonProfile:\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_SECRET_ROTATION_ENABLED = addon_consts.get(\n \"CONST_SECRET_ROTATION_ENABLED\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n if self.context.get_enable_secret_rotation():\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_SECRET_ROTATION_ENABLED\n ] = \"true\"\n\n if self.context.get_disable_secret_rotation():\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_SECRET_ROTATION_ENABLED\n ] = \"false\"\n\n if self.context.get_rotation_poll_interval() is not None:\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_ROTATION_POLL_INTERVAL\n ] = self.context.get_rotation_poll_interval()\n return azure_keyvault_secrets_provider_addon_profile",
"def secret(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"secret\")",
"def secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secret\")",
"def secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secret\")",
"def _get_secret_value(self):\n try:\n kwargs = {'SecretId': self.name}\n if self.stage is not None:\n kwargs['VersionStage'] = self.stage\n response = self.secretsmanager_client.get_secret_value(**kwargs)\n except ClientError as e:\n logger.error(f\"Unable to retrieve value for secret {self.name}. {type(e)}: {e}\")\n raise\n else:\n return response",
"def get_secret(project_name, secret_name):\n secrets = secretmanager.SecretManagerServiceClient()\n secret_value = (\n secrets.access_secret_version(\n \"projects/\" + project_name + \"/secrets/\" + secret_name + \"/versions/latest\"\n )\n .payload.data.decode(\"utf-8\")\n .replace(\"\\n\", \"\")\n )\n return secret_value",
"def _decrypt_secret(\n self, \n encryption_key: str,\n secret_list: List,\n secret_name: str\n ):\n f = Fernet(\n bytes(encryption_key, \"utf-8\")\n )\n secret=None\n if 'secrets' in secret_list:\n if secret_name in secret_list['secrets']:\n secret = f.decrypt(\n bytes(secret_list['secrets'][secret_name], \"utf-8\")\n ).decode('UTF-8')\n #self.log.log_success(\n # f'{secret_name} : {secret}'\n #)\n return secret",
"def secret_key_ref(self) -> Optional['outputs.CSIUnitySpecDriverCommonEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIPowerMaxSpecDriverControllerEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def _get_secret_string(self):\n result = self._get_secret_value().get('SecretString')\n if result is None:\n raise ValueError(f\"There is no SecretString for named {self.name!r}.\")\n return result",
"def get_ssm_secret_value(parameter_name):\n\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html#SSM.Client.get_parameter\n return SSM.get_parameter(\n Name=parameter_name,\n WithDecryption=True\n ).get(\"Parameter\").get(\"Value\")",
"def secret_key_ref(self) -> Optional['outputs.CSIUnitySpecDriverControllerEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def _get_disable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_azure_keyvault_kms = self.raw_param.get(\"disable_azure_keyvault_kms\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_azure_keyvault_kms and self._get_enable_azure_keyvault_kms(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-azure-keyvault-kms and --disable-azure-keyvault-kms at the same time.\"\n )\n\n return disable_azure_keyvault_kms",
"def secret_key_ref(self) -> Optional['outputs.CSIPowerMaxSpecDriverNodeEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIPowerMaxSpecDriverCommonEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIUnitySpecDriverSideCarsEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIUnitySpecDriverNodeEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIPowerMaxSpecDriverSideCarsEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secret_key_ref(self) -> Optional['outputs.CSIIsilonSpecDriverCommonEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"async def jwt_secret() -> Optional[str]:\n if not jwt_secret_config:\n raise RuntimeError(\"jwt_secret_config not set in auth\")\n if hasattr(jwt_secret_config, \"get_secret_value\"):\n return jwt_secret_config.get_secret_value()\n else:\n return jwt_secret_config",
"def omit_secret(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"omit_secret\")",
"def omit_secret(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"omit_secret\")",
"def secret_key_ref(self) -> Optional['outputs.CSIIsilonSpecDriverNodeEnvsValueFromSecretKeyRef']:\n return pulumi.get(self, \"secret_key_ref\")",
"def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecretArgs']]]]:\n return pulumi.get(self, \"secrets\")",
"def omit_secret(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"omit_secret\")"
] |
[
"0.80781806",
"0.7530236",
"0.70143294",
"0.6596142",
"0.61500645",
"0.57170594",
"0.5615502",
"0.55860037",
"0.55860037",
"0.55594075",
"0.547852",
"0.5460882",
"0.5456694",
"0.5448126",
"0.5447635",
"0.54417706",
"0.5421276",
"0.54191613",
"0.54140186",
"0.54047495",
"0.5390607",
"0.53890103",
"0.53868496",
"0.5366614",
"0.53589636",
"0.5357352",
"0.5357352",
"0.5355638",
"0.53538126",
"0.53443176"
] |
0.76547563
|
1
|
Internal function to obtain the value of rotation_poll_interval. This function supports the option of enable_validation. When enabled, in update mode, if rotation_poll_interval is specified but azure keyvault secret provider addon is not enabled, an InvalidArgumentValueError will be raised.
|
def _get_rotation_poll_interval(self, enable_validation: bool = False) -> Union[str, None]:
# determine the value of constants
addon_consts = self.get_addon_consts()
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(
"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME"
)
CONST_ROTATION_POLL_INTERVAL = addon_consts.get(
"CONST_ROTATION_POLL_INTERVAL"
)
# read the original value passed by the command
rotation_poll_interval = self.raw_param.get("rotation_poll_interval")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.addon_profiles and
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME
).config.get(CONST_ROTATION_POLL_INTERVAL) is not None
):
rotation_poll_interval = self.mc.addon_profiles.get(
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME
).config.get(CONST_ROTATION_POLL_INTERVAL)
# this parameter does not need dynamic completion
# validation
if enable_validation:
if self.decorator_mode == DecoratorMode.UPDATE:
if rotation_poll_interval:
azure_keyvault_secrets_provider_enabled = (
self.mc and
self.mc.addon_profiles and
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled
)
if not azure_keyvault_secrets_provider_enabled:
raise InvalidArgumentValueError(
"--rotation-poll-interval can only be specified "
"when azure-keyvault-secrets-provider is enabled "
"Please use command 'az aks enable-addons' to enable it."
)
return rotation_poll_interval
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_rotation_poll_interval(self) -> Union[str, None]:\n return self._get_rotation_poll_interval(enable_validation=True)",
"def poll_interval(self):\n return self.opts.poll_interval",
"def check_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"check_interval\")",
"def check_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"check_interval\")",
"def check_interval(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"check_interval\")",
"def get_interval_of_checking(self):\n return int(self.config['interval_of_checking'])",
"def evaluation_interval(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"evaluation_interval\")",
"def reminder_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"reminder_interval\")",
"def reminder_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"reminder_interval\")",
"def getcheckinterval(): # real signature unknown; restored from __doc__\n pass",
"def _get_polling(self, name):\n path_format_arguments = {\n \"scheduleName\": name,\n \"resourceGroupName\": self._resource_group_name,\n \"workspaceName\": self._workspace_name,\n }\n return AzureMLPolling(\n LROConfigurations.POLL_INTERVAL,\n path_format_arguments=path_format_arguments,\n )",
"def refresh_interval(self):\n return self._refresh_interval",
"def retry_interval_in_minutes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"retry_interval_in_minutes\")",
"def retry_interval_in_minutes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"retry_interval_in_minutes\")",
"def interval(self):\n if self.keep_interval:\n return self._interval\n else:\n # Updating the interval and then returning the new value\n self._update_interval()\n # Setting the flag to keep the new interval until it is exceeded the next time\n self.keep_interval = True\n return self._interval",
"def get_smart_guard_interval(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetSmartGuardInterval', self.handle)",
"def connect_retry_interval(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"connect_retry_interval\")",
"def reminder_interval(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"reminder_interval\")",
"def interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval\")",
"def _get_update_interval(self, data: T) -> timedelta:\n if self.expect_change_until > monotonic():\n return timedelta(seconds=5)\n\n return timedelta(seconds=30)",
"def get_time_sync_interval(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetTimeSyncInterval', self.handle)",
"def _get_disable_secret_rotation(self, enable_validation: bool = False) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n # read the original value passed by the command\n disable_secret_rotation = self.raw_param.get(\"disable_secret_rotation\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if disable_secret_rotation:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--disable-secret-rotation can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled. \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return disable_secret_rotation",
"def get_monitor_interval(self):\n return self.conf['icmp_check_interval']",
"def _get_enable_secret_rotation(self, enable_validation: bool = False) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n CONST_SECRET_ROTATION_ENABLED = addon_consts.get(\n \"CONST_SECRET_ROTATION_ENABLED\"\n )\n\n # read the original value passed by the command\n enable_secret_rotation = self.raw_param.get(\"enable_secret_rotation\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_SECRET_ROTATION_ENABLED) is not None\n ):\n enable_secret_rotation = self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_SECRET_ROTATION_ENABLED) == \"true\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if enable_secret_rotation:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--enable-secret-rotation can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled. \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return enable_secret_rotation",
"def interval(self) -> int:\n return pulumi.get(self, \"interval\")",
"def checkpoint_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"checkpoint_interval\")",
"def password_change_interval(self) -> str:\n return pulumi.get(self, \"password_change_interval\")",
"def get_config_validity(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetConfigValidity', self.handle)",
"def interval(self):\n return self._interval",
"def interval(self):\n return self._interval"
] |
[
"0.82278347",
"0.671248",
"0.6394673",
"0.6394673",
"0.59591895",
"0.5958116",
"0.5872276",
"0.5869967",
"0.5869967",
"0.5868385",
"0.57615155",
"0.5755643",
"0.5723402",
"0.5723402",
"0.56774837",
"0.56355",
"0.56190455",
"0.55804837",
"0.5576462",
"0.55636406",
"0.55400616",
"0.55241233",
"0.5519271",
"0.55074584",
"0.54738164",
"0.54580486",
"0.544051",
"0.5413355",
"0.5399597",
"0.5399597"
] |
0.83072454
|
0
|
Obtain the value of rotation_poll_interval. This function will verify the parameter by default. In update mode, if rotation_poll_interval is specified but azure keyvault secret provider addon is not enabled, an InvalidArgumentValueError will be raised.
|
def get_rotation_poll_interval(self) -> Union[str, None]:
return self._get_rotation_poll_interval(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_rotation_poll_interval(self, enable_validation: bool = False) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n # read the original value passed by the command\n rotation_poll_interval = self.raw_param.get(\"rotation_poll_interval\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_ROTATION_POLL_INTERVAL) is not None\n ):\n rotation_poll_interval = self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_ROTATION_POLL_INTERVAL)\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if rotation_poll_interval:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--rotation-poll-interval can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return rotation_poll_interval",
"def poll_interval(self):\n return self.opts.poll_interval",
"def check_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"check_interval\")",
"def check_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"check_interval\")",
"def reminder_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"reminder_interval\")",
"def reminder_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"reminder_interval\")",
"def evaluation_interval(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"evaluation_interval\")",
"def check_interval(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"check_interval\")",
"def retry_interval_in_minutes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"retry_interval_in_minutes\")",
"def retry_interval_in_minutes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"retry_interval_in_minutes\")",
"def getcheckinterval(): # real signature unknown; restored from __doc__\n pass",
"def reminder_interval(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"reminder_interval\")",
"def interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval\")",
"def get_interval_of_checking(self):\n return int(self.config['interval_of_checking'])",
"def rotation_period(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rotation_period\")",
"def connect_retry_interval(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"connect_retry_interval\")",
"def _get_polling(self, name):\n path_format_arguments = {\n \"scheduleName\": name,\n \"resourceGroupName\": self._resource_group_name,\n \"workspaceName\": self._workspace_name,\n }\n return AzureMLPolling(\n LROConfigurations.POLL_INTERVAL,\n path_format_arguments=path_format_arguments,\n )",
"def test_REFRESH_INTERVAL(self):\n self.assertIsInstance(constants.REFRESH_INTERVAL, int,\n \"constants.REFRESH_INTERVAL must be an integer.\")",
"def checkpoint_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"checkpoint_interval\")",
"def refresh_period_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"refresh_period_in_seconds\")",
"def password_change_interval(self) -> str:\n return pulumi.get(self, \"password_change_interval\")",
"def interval(self) -> Optional[str]:\n return pulumi.get(self, \"interval\")",
"def interval_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval_in_seconds\")",
"def interval_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval_in_seconds\")",
"def refresh_interval(self):\n return self._refresh_interval",
"def interval(self) -> int:\n return pulumi.get(self, \"interval\")",
"def _get_update_interval(self, data: T) -> timedelta:\n if self.expect_change_until > monotonic():\n return timedelta(seconds=5)\n\n return timedelta(seconds=30)",
"def get_smart_guard_interval(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetSmartGuardInterval', self.handle)",
"def get_user_check_interval(self):\n check_interval = None\n print(\"How many seconds between consequetive checks?:\")\n while not check_interval:\n try:\n check_interval = int(input())\n except ValueError:\n print(\"That doesn't look like a number. Try again please.\")\n continue\n return check_interval",
"def interval(self):\n if self.keep_interval:\n return self._interval\n else:\n # Updating the interval and then returning the new value\n self._update_interval()\n # Setting the flag to keep the new interval until it is exceeded the next time\n self.keep_interval = True\n return self._interval"
] |
[
"0.77896523",
"0.6479036",
"0.6469635",
"0.6469635",
"0.6127741",
"0.6127741",
"0.59140736",
"0.5876525",
"0.5846791",
"0.5846791",
"0.5748593",
"0.5729529",
"0.57221097",
"0.5681048",
"0.5676161",
"0.5573035",
"0.5565345",
"0.5528974",
"0.55278695",
"0.550364",
"0.54940975",
"0.54776084",
"0.5462481",
"0.5462481",
"0.5459793",
"0.5424381",
"0.5420957",
"0.5400684",
"0.5309058",
"0.5282393"
] |
0.80301034
|
0
|
Internal function to obtain the value of enable_aad. This function supports the option of enable_validation. When enabled, in create mode, if the value of enable_aad is True and any of aad_client_app_id, aad_server_app_id or aad_server_app_secret is asssigned, a MutuallyExclusiveArgumentError will be raised. If the value of enable_aad is False and the value of enable_azure_rbac is True, a RequiredArgumentMissingError will be raised. In update mode, if enable_aad is specified and managed aad has been enabled, an InvalidArgumentValueError will be raised.
|
def _get_enable_aad(self, enable_validation: bool = False) -> bool:
# read the original value passed by the command
enable_aad = self.raw_param.get("enable_aad")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.aad_profile and
self.mc.aad_profile.managed is not None
):
enable_aad = self.mc.aad_profile.managed
# this parameter does not need dynamic completion
# validation
if enable_validation:
if self.decorator_mode == DecoratorMode.CREATE:
(
aad_client_app_id,
aad_server_app_id,
aad_server_app_secret,
) = self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(
enable_validation=False
)
if enable_aad:
if any(
[
aad_client_app_id,
aad_server_app_id,
aad_server_app_secret,
]
):
raise MutuallyExclusiveArgumentError(
"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or "
"--aad-server-app-secret"
)
if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):
raise RequiredArgumentMissingError(
"--enable-azure-rbac can only be used together with --enable-aad"
)
elif self.decorator_mode == DecoratorMode.UPDATE:
if enable_aad:
if check_is_managed_aad_cluster(self.mc):
raise InvalidArgumentValueError(
'Cannot specify "--enable-aad" if managed AAD is already enabled'
)
return enable_aad
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_enable_aad(self) -> bool:\n\n return self._get_enable_aad(enable_validation=True)",
"def _get_enable_azure_rbac(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_rbac = self.raw_param.get(\"enable_azure_rbac\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.enable_azure_rbac is not None\n ):\n enable_azure_rbac = self.mc.aad_profile.enable_azure_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_azure_rbac:\n if self.decorator_mode == DecoratorMode.CREATE:\n if not self._get_enable_aad(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n if self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--enable-azure-rbac cannot be used together with --disable-rbac\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-azure-rbac\" if managed AAD is not enabled'\n )\n if self._get_disable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-azure-rbac\" and \"--disable-azure-rbac\" at the same time'\n )\n return enable_azure_rbac",
"def _get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n self, enable_validation: bool = False\n ) -> Tuple[Union[str, None], Union[str, None], Union[str, None]]:\n # get aad profile from `mc`\n aad_profile = None\n if self.mc:\n aad_profile = self.mc.aad_profile\n\n # read the original value passed by the command\n aad_client_app_id = self.raw_param.get(\"aad_client_app_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.client_app_id is not None:\n aad_client_app_id = aad_profile.client_app_id\n\n # read the original value passed by the command\n aad_server_app_id = self.raw_param.get(\"aad_server_app_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.server_app_id is not None:\n aad_server_app_id = aad_profile.server_app_id\n\n # read the original value passed by the command\n aad_server_app_secret = self.raw_param.get(\"aad_server_app_secret\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.server_app_secret is not None:\n aad_server_app_secret = aad_profile.server_app_secret\n\n # these parameters do not need dynamic completion\n\n # validation\n if enable_validation:\n enable_aad = self._get_enable_aad(enable_validation=False)\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n return aad_client_app_id, aad_server_app_id, aad_server_app_secret",
"def _get_enable_rbac(self, enable_validation: bool = False) -> Union[bool, None]:\n # read the original value passed by the command\n enable_rbac = self.raw_param.get(\"enable_rbac\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.enable_rbac is not None\n ):\n enable_rbac = self.mc.enable_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_rbac and self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\"specify either '--disable-rbac' or '--enable-rbac', not both.\")\n return enable_rbac",
"def _get_aad_tenant_id(\n self, enable_validation: bool = False, read_only: bool = False\n ) -> Union[str, None]:\n # read the original value passed by the command\n aad_tenant_id = self.raw_param.get(\"aad_tenant_id\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.tenant_id is not None\n ):\n aad_tenant_id = self.mc.aad_profile.tenant_id\n read_from_mc = True\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return aad_tenant_id\n\n # dynamic completion for create mode only\n if self.decorator_mode == DecoratorMode.CREATE:\n if not read_from_mc and not self._get_enable_aad(\n enable_validation=False\n ):\n if aad_tenant_id is None and any(\n self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(enable_validation=False)\n ):\n profile = Profile(cli_ctx=self.cmd.cli_ctx)\n _, _, aad_tenant_id = profile.get_login_credentials()\n\n # validation\n if enable_validation:\n if aad_tenant_id:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--aad-tenant-id\" if managed AAD is not enabled'\n )\n return aad_tenant_id",
"def _get_enable_vpa(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n enable_vpa = self.raw_param.get(\"enable_vpa\")\n\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_vpa and self._get_disable_vpa(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-vpa and --disable-vpa at the same time.\"\n )\n\n return enable_vpa",
"def get_enable_azure_rbac(self) -> bool:\n\n return self._get_enable_azure_rbac(enable_validation=True)",
"def enable_azure_rbac(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_azure_rbac\")",
"def enable_azure_rbac(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_azure_rbac\")",
"def _get_disable_azure_rbac(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_azure_rbac = self.raw_param.get(\"disable_azure_rbac\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_azure_rbac:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--disable-azure-rbac\" if managed AAD is not enabled'\n )\n if self._get_enable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-azure-rbac\" and \"--disable-azure-rbac\" at the same time'\n )\n return disable_azure_rbac",
"def _get_enable_ahub(\n self, enable_validation: bool = False\n ) -> bool:\n # read the original value passed by the command\n enable_ahub = self.raw_param.get(\"enable_ahub\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.windows_profile:\n enable_ahub = self.mc.windows_profile.license_type == \"Windows_Server\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_ahub and self._get_disable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return enable_ahub",
"def _get_enable_keda(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n enable_keda = self.raw_param.get(\"enable_keda\")\n\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"workload_auto_scaler_profile\") and # backward compatibility\n self.mc.workload_auto_scaler_profile and\n self.mc.workload_auto_scaler_profile.keda\n ):\n enable_keda = self.mc.workload_auto_scaler_profile.keda.enabled\n\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_keda and self._get_disable_keda(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-keda and --disable-keda at the same time.\"\n )\n\n return enable_keda",
"def get_enable_rbac(self) -> Union[bool, None]:\n return self._get_enable_rbac(enable_validation=True)",
"def get_aad_tenant_id(self) -> Union[str, None]:\n return self._get_aad_tenant_id(enable_validation=True)",
"def get_enable_vpa(self) -> bool:\n return self._get_enable_vpa(enable_validation=True)",
"def _get_enable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_keyvault_kms = self.raw_param.get(\"enable_azure_keyvault_kms\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"security_profile\") and # backward compatibility\n self.mc.security_profile and\n self.mc.security_profile.azure_key_vault_kms\n ):\n enable_azure_keyvault_kms = self.mc.security_profile.azure_key_vault_kms.enabled\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if bool(enable_azure_keyvault_kms) != bool(self._get_azure_keyvault_kms_key_id(enable_validation=False)):\n raise RequiredArgumentMissingError(\n 'You must set \"--enable-azure-keyvault-kms\" and \"--azure-keyvault-kms-key-id\" at the same time.'\n )\n\n return enable_azure_keyvault_kms",
"def _get_enable_windows_gmsa(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_windows_gmsa = self.raw_param.get(\"enable_windows_gmsa\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.windows_profile and\n hasattr(self.mc.windows_profile, \"gmsa_profile\") and # backward compatibility\n self.mc.windows_profile.gmsa_profile and\n self.mc.windows_profile.gmsa_profile.enabled is not None\n ):\n enable_windows_gmsa = self.mc.windows_profile.gmsa_profile.enabled\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n (\n gmsa_dns_server,\n gmsa_root_domain_name,\n ) = self._get_gmsa_dns_server_and_root_domain_name(\n enable_validation=False\n )\n self.__validate_gmsa_options(\n enable_windows_gmsa, gmsa_dns_server, gmsa_root_domain_name, self.get_yes()\n )\n return enable_windows_gmsa",
"def enable_rbac(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_rbac\")",
"def get_attach_acr(self) -> Union[str, None]:\n # read the original value passed by the command\n attach_acr = self.raw_param.get(\"attach_acr\")\n\n # this parameter does not need dynamic completion\n # validation\n if self.decorator_mode == DecoratorMode.CREATE and attach_acr:\n if self._get_enable_managed_identity(enable_validation=False):\n # Attach acr operation will be handled after the cluster is created\n if self.get_no_wait():\n raise MutuallyExclusiveArgumentError(\n \"When --attach-acr and --enable-managed-identity are both specified, \"\n \"--no-wait is not allowed, please wait until the whole operation succeeds.\"\n )\n else:\n # newly added check, check whether client_id exists before creating role assignment\n service_principal, _ = self._get_service_principal_and_client_secret(read_only=True)\n if not service_principal:\n raise RequiredArgumentMissingError(\n \"No service principal provided to create the acrpull role assignment for acr.\"\n )\n return attach_acr",
"def _get_aad_admin_group_object_ids(self, enable_validation: bool = False) -> Union[List[str], None]:\n # read the original value passed by the command\n aad_admin_group_object_ids = self.raw_param.get(\"aad_admin_group_object_ids\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.admin_group_object_i_ds is not None\n ):\n aad_admin_group_object_ids = self.mc.aad_profile.admin_group_object_i_ds\n read_from_mc = True\n\n # keep None as None, but empty string (\"\") to empty list ([])\n if not read_from_mc and aad_admin_group_object_ids is not None:\n aad_admin_group_object_ids = aad_admin_group_object_ids.split(',') if aad_admin_group_object_ids else []\n\n # validation\n if enable_validation:\n if aad_admin_group_object_ids:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--aad-admin-group-object-ids\" if managed AAD is not enabled'\n )\n\n return aad_admin_group_object_ids",
"def _get_enable_azure_monitor_metrics(self, enable_validation: bool = False) -> bool:\n # print(\"_get_enable_azure_monitor_metrics being called...\")\n # Read the original value passed by the command.\n enable_azure_monitor_metrics = self.raw_param.get(\"enable_azure_monitor_metrics\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"azure_monitor_profile\") and\n self.mc.azure_monitor_profile and\n self.mc.azure_monitor_profile.metrics\n ):\n enable_azure_monitor_metrics = self.mc.azure_monitor_profile.metrics.enabled\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_azure_monitor_metrics and self._get_disable_azure_monitor_metrics(False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-azure-monitor-metrics and --disable-azure-monitor-metrics at the same time\"\n )\n if enable_azure_monitor_metrics and not check_is_msi_cluster(self.mc):\n raise RequiredArgumentMissingError(\n \"--enable-azure-monitor-metrics can only be specified for clusters with managed identity enabled\"\n )\n return enable_azure_monitor_metrics",
"def _get_disable_rbac(self, enable_validation: bool = False) -> Union[bool, None]:\n # read the original value passed by the command\n disable_rbac = self.raw_param.get(\"disable_rbac\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.enable_rbac is not None\n ):\n disable_rbac = not self.mc.enable_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_rbac and self._get_enable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--enable-azure-rbac cannot be used together with --disable-rbac\"\n )\n if disable_rbac and self._get_enable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\"specify either '--disable-rbac' or '--enable-rbac', not both.\")\n return disable_rbac",
"def aof_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"aof_enabled\")",
"def aad_profile(self) -> Optional[pulumi.Input['AADProfileArgs']]:\n return pulumi.get(self, \"aad_profile\")",
"def _get_enable_addons(self, enable_validation: bool = False) -> List[str]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n valid_addon_keys = addon_consts.get(\"ADDONS\").keys()\n\n # read the original value passed by the command\n enable_addons = self.raw_param.get(\"enable_addons\")\n\n # normalize\n enable_addons = enable_addons.split(',') if enable_addons else []\n\n # validation\n if enable_validation:\n # check duplicate addons\n duplicate_addons_set = {\n x for x in enable_addons if enable_addons.count(x) >= 2\n }\n if len(duplicate_addons_set) != 0:\n raise InvalidArgumentValueError(\n \"Duplicate addon{} '{}' found in option --enable-addons.\".format(\n \"s\" if len(duplicate_addons_set) > 1 else \"\",\n \",\".join(duplicate_addons_set),\n )\n )\n\n # check unrecognized addons\n enable_addons_set = set(enable_addons)\n invalid_addons_set = enable_addons_set.difference(valid_addon_keys)\n if len(invalid_addons_set) != 0:\n raise InvalidArgumentValueError(\n \"'{}' {} not recognized by the --enable-addons argument.\".format(\n \",\".join(invalid_addons_set),\n \"are\" if len(invalid_addons_set) > 1 else \"is\",\n )\n )\n\n # check monitoring/workspace_resource_id\n workspace_resource_id = self._get_workspace_resource_id(read_only=True)\n if \"monitoring\" not in enable_addons and workspace_resource_id:\n raise RequiredArgumentMissingError(\n '\"--workspace-resource-id\" requires \"--enable-addons monitoring\".')\n\n # check virtual node/aci_subnet_name/vnet_subnet_id\n # Note: The external parameters involved in the validation are not verified in their own getters.\n aci_subnet_name = self.get_aci_subnet_name()\n vnet_subnet_id = self.get_vnet_subnet_id()\n if \"virtual-node\" in enable_addons and not (aci_subnet_name and vnet_subnet_id):\n raise RequiredArgumentMissingError(\n '\"--enable-addons virtual-node\" requires \"--aci-subnet-name\" and \"--vnet-subnet-id\".')\n return enable_addons",
"def aad_client_id(self) -> Optional[str]:\n return pulumi.get(self, \"aad_client_id\")",
"def _get_enable_managed_identity(\n self, enable_validation: bool = False, read_only: bool = False\n ) -> bool:\n # read the original value passed by the command\n enable_managed_identity = self.raw_param.get(\"enable_managed_identity\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object\n read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.identity:\n enable_managed_identity = check_is_msi_cluster(self.mc)\n read_from_mc = True\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return enable_managed_identity\n\n # dynamic completion for create mode only\n if self.decorator_mode == DecoratorMode.CREATE:\n (\n service_principal,\n client_secret,\n ) = self._get_service_principal_and_client_secret(read_only=True)\n if not read_from_mc and service_principal and client_secret:\n enable_managed_identity = False\n\n # validation\n if enable_validation:\n if not enable_managed_identity and self._get_assign_identity(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--assign-identity can only be specified when --enable-managed-identity is specified\"\n )\n return enable_managed_identity",
"def fine_grained_authorization_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"fine_grained_authorization_enabled\")",
"def account_enabled(self):\n if \"accountEnabled\" in self._prop_dict:\n return self._prop_dict[\"accountEnabled\"]\n else:\n return None",
"def account_enabled(self):\n if \"accountEnabled\" in self._prop_dict:\n return self._prop_dict[\"accountEnabled\"]\n else:\n return None"
] |
[
"0.7986423",
"0.69592005",
"0.62913746",
"0.60939187",
"0.60804445",
"0.60697037",
"0.59247243",
"0.5906257",
"0.5906257",
"0.56612194",
"0.55931604",
"0.5524499",
"0.5514272",
"0.5325802",
"0.5310161",
"0.52394617",
"0.5164164",
"0.51582897",
"0.51489145",
"0.5142212",
"0.5037039",
"0.50163305",
"0.49941725",
"0.49304312",
"0.48489955",
"0.4809782",
"0.47738454",
"0.47608292",
"0.4755646",
"0.4755646"
] |
0.83274245
|
0
|
Obtain the value of enable_aad. This function will verify the parameter by default. In create mode, if the value of enable_aad is True and any of aad_client_app_id, aad_server_app_id or aad_server_app_secret is asssigned, a MutuallyExclusiveArgumentError will be raised. If the value of enable_aad is False and the value of enable_azure_rbac is True, a RequiredArgumentMissingError will be raised. In update mode, if enable_aad is specified and managed aad has been enabled, an InvalidArgumentValueError will be raised.
|
def get_enable_aad(self) -> bool:
return self._get_enable_aad(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_enable_aad(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_aad = self.raw_param.get(\"enable_aad\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.managed is not None\n ):\n enable_aad = self.mc.aad_profile.managed\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n enable_validation=False\n )\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if enable_aad:\n if check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-aad\" if managed AAD is already enabled'\n )\n return enable_aad",
"def _get_enable_azure_rbac(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_rbac = self.raw_param.get(\"enable_azure_rbac\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.enable_azure_rbac is not None\n ):\n enable_azure_rbac = self.mc.aad_profile.enable_azure_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_azure_rbac:\n if self.decorator_mode == DecoratorMode.CREATE:\n if not self._get_enable_aad(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n if self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--enable-azure-rbac cannot be used together with --disable-rbac\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-azure-rbac\" if managed AAD is not enabled'\n )\n if self._get_disable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-azure-rbac\" and \"--disable-azure-rbac\" at the same time'\n )\n return enable_azure_rbac",
"def enable_azure_rbac(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_azure_rbac\")",
"def enable_azure_rbac(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_azure_rbac\")",
"def _get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n self, enable_validation: bool = False\n ) -> Tuple[Union[str, None], Union[str, None], Union[str, None]]:\n # get aad profile from `mc`\n aad_profile = None\n if self.mc:\n aad_profile = self.mc.aad_profile\n\n # read the original value passed by the command\n aad_client_app_id = self.raw_param.get(\"aad_client_app_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.client_app_id is not None:\n aad_client_app_id = aad_profile.client_app_id\n\n # read the original value passed by the command\n aad_server_app_id = self.raw_param.get(\"aad_server_app_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.server_app_id is not None:\n aad_server_app_id = aad_profile.server_app_id\n\n # read the original value passed by the command\n aad_server_app_secret = self.raw_param.get(\"aad_server_app_secret\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.server_app_secret is not None:\n aad_server_app_secret = aad_profile.server_app_secret\n\n # these parameters do not need dynamic completion\n\n # validation\n if enable_validation:\n enable_aad = self._get_enable_aad(enable_validation=False)\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n return aad_client_app_id, aad_server_app_id, aad_server_app_secret",
"def get_enable_azure_rbac(self) -> bool:\n\n return self._get_enable_azure_rbac(enable_validation=True)",
"def _get_aad_tenant_id(\n self, enable_validation: bool = False, read_only: bool = False\n ) -> Union[str, None]:\n # read the original value passed by the command\n aad_tenant_id = self.raw_param.get(\"aad_tenant_id\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.tenant_id is not None\n ):\n aad_tenant_id = self.mc.aad_profile.tenant_id\n read_from_mc = True\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return aad_tenant_id\n\n # dynamic completion for create mode only\n if self.decorator_mode == DecoratorMode.CREATE:\n if not read_from_mc and not self._get_enable_aad(\n enable_validation=False\n ):\n if aad_tenant_id is None and any(\n self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(enable_validation=False)\n ):\n profile = Profile(cli_ctx=self.cmd.cli_ctx)\n _, _, aad_tenant_id = profile.get_login_credentials()\n\n # validation\n if enable_validation:\n if aad_tenant_id:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--aad-tenant-id\" if managed AAD is not enabled'\n )\n return aad_tenant_id",
"def _get_enable_rbac(self, enable_validation: bool = False) -> Union[bool, None]:\n # read the original value passed by the command\n enable_rbac = self.raw_param.get(\"enable_rbac\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.enable_rbac is not None\n ):\n enable_rbac = self.mc.enable_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_rbac and self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\"specify either '--disable-rbac' or '--enable-rbac', not both.\")\n return enable_rbac",
"def _get_enable_vpa(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n enable_vpa = self.raw_param.get(\"enable_vpa\")\n\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_vpa and self._get_disable_vpa(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-vpa and --disable-vpa at the same time.\"\n )\n\n return enable_vpa",
"def enable_rbac(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_rbac\")",
"def get_enable_rbac(self) -> Union[bool, None]:\n return self._get_enable_rbac(enable_validation=True)",
"def _get_disable_azure_rbac(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_azure_rbac = self.raw_param.get(\"disable_azure_rbac\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_azure_rbac:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--disable-azure-rbac\" if managed AAD is not enabled'\n )\n if self._get_enable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-azure-rbac\" and \"--disable-azure-rbac\" at the same time'\n )\n return disable_azure_rbac",
"def get_attach_acr(self) -> Union[str, None]:\n # read the original value passed by the command\n attach_acr = self.raw_param.get(\"attach_acr\")\n\n # this parameter does not need dynamic completion\n # validation\n if self.decorator_mode == DecoratorMode.CREATE and attach_acr:\n if self._get_enable_managed_identity(enable_validation=False):\n # Attach acr operation will be handled after the cluster is created\n if self.get_no_wait():\n raise MutuallyExclusiveArgumentError(\n \"When --attach-acr and --enable-managed-identity are both specified, \"\n \"--no-wait is not allowed, please wait until the whole operation succeeds.\"\n )\n else:\n # newly added check, check whether client_id exists before creating role assignment\n service_principal, _ = self._get_service_principal_and_client_secret(read_only=True)\n if not service_principal:\n raise RequiredArgumentMissingError(\n \"No service principal provided to create the acrpull role assignment for acr.\"\n )\n return attach_acr",
"def _get_enable_ahub(\n self, enable_validation: bool = False\n ) -> bool:\n # read the original value passed by the command\n enable_ahub = self.raw_param.get(\"enable_ahub\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.windows_profile:\n enable_ahub = self.mc.windows_profile.license_type == \"Windows_Server\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_ahub and self._get_disable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return enable_ahub",
"def get_aad_tenant_id(self) -> Union[str, None]:\n return self._get_aad_tenant_id(enable_validation=True)",
"def aof_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"aof_enabled\")",
"def fine_grained_authorization_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"fine_grained_authorization_enabled\")",
"def get_enable_vpa(self) -> bool:\n return self._get_enable_vpa(enable_validation=True)",
"def _get_enable_keda(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n enable_keda = self.raw_param.get(\"enable_keda\")\n\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"workload_auto_scaler_profile\") and # backward compatibility\n self.mc.workload_auto_scaler_profile and\n self.mc.workload_auto_scaler_profile.keda\n ):\n enable_keda = self.mc.workload_auto_scaler_profile.keda.enabled\n\n # This parameter does not need dynamic completion.\n if enable_validation:\n if enable_keda and self._get_disable_keda(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-keda and --disable-keda at the same time.\"\n )\n\n return enable_keda",
"def enable_rbac(self) -> bool:\n return pulumi.get(self, \"enable_rbac\")",
"def aad_profile(self) -> Optional[pulumi.Input['AADProfileArgs']]:\n return pulumi.get(self, \"aad_profile\")",
"def aad_client_id(self) -> Optional[str]:\n return pulumi.get(self, \"aad_client_id\")",
"def enable_authentication(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_authentication\")",
"def _get_enable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_keyvault_kms = self.raw_param.get(\"enable_azure_keyvault_kms\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"security_profile\") and # backward compatibility\n self.mc.security_profile and\n self.mc.security_profile.azure_key_vault_kms\n ):\n enable_azure_keyvault_kms = self.mc.security_profile.azure_key_vault_kms.enabled\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if bool(enable_azure_keyvault_kms) != bool(self._get_azure_keyvault_kms_key_id(enable_validation=False)):\n raise RequiredArgumentMissingError(\n 'You must set \"--enable-azure-keyvault-kms\" and \"--azure-keyvault-kms-key-id\" at the same time.'\n )\n\n return enable_azure_keyvault_kms",
"def check_enable_mode(self, *args, **kwargs):\n pass",
"def app_enable(resource, appkernel, enable=True, dry_run=False):\n import argparse\n log.info((\"Enabling \" if enable else \"Disabling \") +\n (\"%s\" % resource if appkernel is None else\"%s on %s\" % (appkernel, resource) ))\n if enable:\n return on_parsed(argparse.Namespace(resource=resource, application=appkernel))\n else:\n return off_parsed(argparse.Namespace(resource=resource, application=appkernel))",
"def enable_alts(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_alts\")",
"def aad_tenant_id(self) -> Optional[str]:\n return pulumi.get(self, \"aad_tenant_id\")",
"def _get_aad_admin_group_object_ids(self, enable_validation: bool = False) -> Union[List[str], None]:\n # read the original value passed by the command\n aad_admin_group_object_ids = self.raw_param.get(\"aad_admin_group_object_ids\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.admin_group_object_i_ds is not None\n ):\n aad_admin_group_object_ids = self.mc.aad_profile.admin_group_object_i_ds\n read_from_mc = True\n\n # keep None as None, but empty string (\"\") to empty list ([])\n if not read_from_mc and aad_admin_group_object_ids is not None:\n aad_admin_group_object_ids = aad_admin_group_object_ids.split(',') if aad_admin_group_object_ids else []\n\n # validation\n if enable_validation:\n if aad_admin_group_object_ids:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--aad-admin-group-object-ids\" if managed AAD is not enabled'\n )\n\n return aad_admin_group_object_ids",
"def _get_disable_rbac(self, enable_validation: bool = False) -> Union[bool, None]:\n # read the original value passed by the command\n disable_rbac = self.raw_param.get(\"disable_rbac\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.enable_rbac is not None\n ):\n disable_rbac = not self.mc.enable_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_rbac and self._get_enable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--enable-azure-rbac cannot be used together with --disable-rbac\"\n )\n if disable_rbac and self._get_enable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\"specify either '--disable-rbac' or '--enable-rbac', not both.\")\n return disable_rbac"
] |
[
"0.8022672",
"0.668778",
"0.6254986",
"0.6254986",
"0.5998548",
"0.58531916",
"0.5745483",
"0.5719853",
"0.5707755",
"0.553196",
"0.5460657",
"0.53346",
"0.5292276",
"0.52745384",
"0.52134436",
"0.5168049",
"0.50503254",
"0.50189227",
"0.49438113",
"0.49048287",
"0.49006552",
"0.48642346",
"0.48150846",
"0.481313",
"0.4764113",
"0.47563827",
"0.47314855",
"0.47014973",
"0.47014248",
"0.466044"
] |
0.77677137
|
1
|
Internal function to obtain the value of aad_client_app_id, aad_server_app_id and aad_server_app_secret. This function supports the option of enable_validation. When enabled, if the value of enable_aad is True and any of aad_client_app_id, aad_server_app_id or aad_server_app_secret is asssigned, a MutuallyExclusiveArgumentError will be raised.
|
def _get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(
self, enable_validation: bool = False
) -> Tuple[Union[str, None], Union[str, None], Union[str, None]]:
# get aad profile from `mc`
aad_profile = None
if self.mc:
aad_profile = self.mc.aad_profile
# read the original value passed by the command
aad_client_app_id = self.raw_param.get("aad_client_app_id")
# try to read the property value corresponding to the parameter from the `mc` object
if aad_profile and aad_profile.client_app_id is not None:
aad_client_app_id = aad_profile.client_app_id
# read the original value passed by the command
aad_server_app_id = self.raw_param.get("aad_server_app_id")
# try to read the property value corresponding to the parameter from the `mc` object
if aad_profile and aad_profile.server_app_id is not None:
aad_server_app_id = aad_profile.server_app_id
# read the original value passed by the command
aad_server_app_secret = self.raw_param.get("aad_server_app_secret")
# try to read the property value corresponding to the parameter from the `mc` object
if aad_profile and aad_profile.server_app_secret is not None:
aad_server_app_secret = aad_profile.server_app_secret
# these parameters do not need dynamic completion
# validation
if enable_validation:
enable_aad = self._get_enable_aad(enable_validation=False)
if enable_aad:
if any(
[
aad_client_app_id,
aad_server_app_id,
aad_server_app_secret,
]
):
raise MutuallyExclusiveArgumentError(
"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or "
"--aad-server-app-secret"
)
return aad_client_app_id, aad_server_app_id, aad_server_app_secret
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n self,\n ) -> Tuple[Union[str, None], Union[str, None], Union[str, None]]:\n return self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(enable_validation=True)",
"def _get_enable_aad(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_aad = self.raw_param.get(\"enable_aad\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.managed is not None\n ):\n enable_aad = self.mc.aad_profile.managed\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n enable_validation=False\n )\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if enable_aad:\n if check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-aad\" if managed AAD is already enabled'\n )\n return enable_aad",
"def aad_client_id(self) -> Optional[str]:\n return pulumi.get(self, \"aad_client_id\")",
"def _get_aad_tenant_id(\n self, enable_validation: bool = False, read_only: bool = False\n ) -> Union[str, None]:\n # read the original value passed by the command\n aad_tenant_id = self.raw_param.get(\"aad_tenant_id\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.tenant_id is not None\n ):\n aad_tenant_id = self.mc.aad_profile.tenant_id\n read_from_mc = True\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return aad_tenant_id\n\n # dynamic completion for create mode only\n if self.decorator_mode == DecoratorMode.CREATE:\n if not read_from_mc and not self._get_enable_aad(\n enable_validation=False\n ):\n if aad_tenant_id is None and any(\n self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(enable_validation=False)\n ):\n profile = Profile(cli_ctx=self.cmd.cli_ctx)\n _, _, aad_tenant_id = profile.get_login_credentials()\n\n # validation\n if enable_validation:\n if aad_tenant_id:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--aad-tenant-id\" if managed AAD is not enabled'\n )\n return aad_tenant_id",
"def get_enable_aad(self) -> bool:\n\n return self._get_enable_aad(enable_validation=True)",
"def authorized_gae_applications(self) -> Optional[pulumi.Input[List[pulumi.Input[str]]]]:\n return pulumi.get(self, \"authorized_gae_applications\")",
"def __ValidateAppId(self, app_id):\n assert app_id\n if not self.__trusted and app_id != self.project_id:\n raise datastore_errors.BadRequestError(\n 'app %s cannot access app %s\\'s data' % (self.project_id, app_id))",
"def _get_service_principal_and_client_secret(\n self, enable_validation: bool = False, read_only: bool = False\n ) -> Tuple[Union[str, None], Union[str, None]]:\n # service_principal\n # read the original value passed by the command\n service_principal = self.raw_param.get(\"service_principal\")\n # try to read the property value corresponding to the parameter from the `mc` object\n sp_read_from_mc = False\n if (\n self.mc and\n self.mc.service_principal_profile and\n self.mc.service_principal_profile.client_id is not None\n ):\n service_principal = self.mc.service_principal_profile.client_id\n sp_read_from_mc = True\n\n # client_secret\n # read the original value passed by the command\n client_secret = self.raw_param.get(\"client_secret\")\n # try to read the property value corresponding to the parameter from the `mc` object\n secret_read_from_mc = False\n if (\n self.mc and\n self.mc.service_principal_profile and\n self.mc.service_principal_profile.secret is not None\n ):\n client_secret = self.mc.service_principal_profile.secret\n secret_read_from_mc = True\n\n # consistent check\n if sp_read_from_mc != secret_read_from_mc:\n raise CLIInternalError(\n \"Inconsistent state detected, one of sp and secret is read from the `mc` object.\"\n )\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return service_principal, client_secret\n\n # these parameters do not need dynamic completion\n\n # validation\n if enable_validation:\n # only one of service_principal and client_secret is provided, not both\n if (service_principal or client_secret) and not (service_principal and client_secret):\n raise RequiredArgumentMissingError(\n \"Please provide both --service-principal and --client-secret to use sp as the cluster identity. \"\n \"An sp can be created using the 'az ad sp create-for-rbac' command.\"\n )\n return service_principal, client_secret",
"def _extract_external_aad(self, message, request_id, local_is_sender: bool) -> bytes:\n # If any option were actually Class I, it would be something like\n #\n # the_options = pick some of(message)\n # class_i_options = Message(the_options).opt.encode()\n\n oscore_version = 1\n class_i_options = b\"\"\n if request_id.request_hash is not None:\n class_i_options = Message(request_hash=request_id.request_hash).opt.encode()\n\n algorithms = [self.alg_aead.value]\n if self.external_aad_is_group:\n algorithms.append(self.alg_signature_enc.value)\n algorithms.append(self.alg_signature.value)\n algorithms.append(self.alg_pairwise_key_agreement.value)\n\n external_aad = [\n oscore_version,\n algorithms,\n request_id.kid,\n request_id.partial_iv,\n class_i_options,\n ]\n\n if self.external_aad_is_group:\n # FIXME: We may need to carry this over in the request_id when\n # observation span group rekeyings\n external_aad.append(self.id_context)\n\n assert message.opt.object_security is not None\n external_aad.append(message.opt.object_security)\n\n if local_is_sender:\n external_aad.append(self.sender_auth_cred)\n else:\n external_aad.append(self.recipient_auth_cred)\n external_aad.append(self.group_manager_cred)\n\n external_aad = cbor.dumps(external_aad)\n\n return external_aad",
"def get_service_principal_and_client_secret(\n self\n ) -> Tuple[Union[str, None], Union[str, None]]:\n return self._get_service_principal_and_client_secret(enable_validation=True)",
"def _valid_app_ids(app_ids):\n for app_id in app_ids:\n try:\n app_data = KNACK_CREDENTIALS[app_id]\n\n except KeyError:\n return False\n\n return True",
"def get_aad_tenant_id(self) -> Union[str, None]:\n return self._get_aad_tenant_id(enable_validation=True)",
"def acs_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"acs_secret\")",
"def app_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_secret\")",
"def client_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_app_id\")",
"def set_client_id_and_secret(request):\n if request.method == 'POST':\n form = ClientIDAndSecretForm(request.POST)\n if form.is_valid():\n client_id = form.cleaned_data['client_id']\n client_secret = form.cleaned_data['client_secret']\n additional_client_ids = form.cleaned_data['additional_client_ids']\n whitelisted_emails = form.cleaned_data['whitelisted_emails']\n logging.info('Adding client_id: %s' % client_id)\n auth_utils.SecretKey.set_config(client_id, client_secret,\n additional_client_ids,\n whitelisted_emails)\n else:\n logging.info('Form is invalid')\n return HttpResponseRedirect(reverse(set_client_id_and_secret))\n else:\n client_id, client_secret, additional_client_ids, whitelisted_emails = \\\n auth_utils.SecretKey.get_config()\n form = ClientIDAndSecretForm(initial={\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'additional_client_ids': additional_client_ids,\n 'whitelisted_emails': whitelisted_emails})\n return respond(request, 'set_client_id_and_secret.html', {'form': form})",
"def authorized_gae_applications(self) -> Sequence[str]:\n warnings.warn(\"\"\"The App Engine app IDs that can access this instance. (Deprecated) Applied to First Generation instances only.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"authorized_gae_applications is deprecated: The App Engine app IDs that can access this instance. (Deprecated) Applied to First Generation instances only.\"\"\")\n\n return pulumi.get(self, \"authorized_gae_applications\")",
"def set_client_id_and_secret(request):\n if request.method == 'POST':\n form = ClientIDAndSecretForm(request.POST)\n if form.is_valid():\n client_id = form.cleaned_data['client_id']\n client_secret = form.cleaned_data['client_secret']\n additional_client_ids = form.cleaned_data['additional_client_ids']\n auth_utils.SecretKey.set_config(client_id, client_secret,\n additional_client_ids)\n return HttpResponseRedirect(reverse(set_client_id_and_secret))\n else:\n form = ClientIDAndSecretForm()\n return respond(request, 'set_client_id_and_secret.html', {'form': form})",
"def server_app_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_app_secret\")",
"def check_api_keys(self, request):\n app_id, api_obj = request.META.get(\"HTTP_APP_ID\"), None\n api_secret_key = request.META.get(\"HTTP_API_SECRET_KEY\")\n if app_id and api_secret_key:\n # validate app_id and api_secret_key\n app_id_bool = self._validate_app_id(app_id)\n if not app_id_bool:\n return False, self.app_id_message\n api_secret_key_bool = self._validate_api_secret_key(api_secret_key)\n if not api_secret_key:\n return False, self.api_secret_key_message\n try:\n api_obj = ApiApp.objects.get(app_id=app_id, api_secret_key=api_secret_key, active=True)\n if api_obj:\n self.app(request, api_obj)\n return True, ''\n except ApiApp.DoesNotExist:\n self.app(request, api_obj)\n return False, self.message\n else:\n self.app(request, api_obj)\n return False, self.message",
"def validate_app(logger, base_url, group_id, token, app_id):\n app_response = apps.get_app(base_url, group_id, token, app_id)\n if not app_response:\n return None\n\n app_dict = common.convert_response(app_response)\n if not app_dict:\n return None\n\n logger.info(\"App: {}\".format(app_dict))\n return app_dict",
"def _get_api_server_authorized_ip_ranges(self, enable_validation: bool = False) -> List[str]:\n # read the original value passed by the command\n api_server_authorized_ip_ranges = self.raw_param.get(\n \"api_server_authorized_ip_ranges\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n read_from_mc = False\n if (\n self.mc and\n self.mc.api_server_access_profile and\n self.mc.api_server_access_profile.authorized_ip_ranges is not None\n ):\n api_server_authorized_ip_ranges = (\n self.mc.api_server_access_profile.authorized_ip_ranges\n )\n read_from_mc = True\n\n # normalize\n if not read_from_mc:\n api_server_authorized_ip_ranges = [\n x.strip()\n for x in (\n api_server_authorized_ip_ranges.split(\",\")\n if api_server_authorized_ip_ranges\n else []\n )\n ]\n elif self.decorator_mode == DecoratorMode.UPDATE:\n # normalize, keep None as None\n if api_server_authorized_ip_ranges is not None:\n api_server_authorized_ip_ranges = [\n x.strip()\n for x in (\n api_server_authorized_ip_ranges.split(\",\")\n if api_server_authorized_ip_ranges\n else []\n )\n ]\n\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n if api_server_authorized_ip_ranges:\n if (\n safe_lower(self._get_load_balancer_sku(enable_validation=False)) ==\n CONST_LOAD_BALANCER_SKU_BASIC\n ):\n raise InvalidArgumentValueError(\n \"--api-server-authorized-ip-ranges can only be used with standard load balancer\"\n )\n if self._get_enable_private_cluster(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--api-server-authorized-ip-ranges is not supported for private cluster\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if api_server_authorized_ip_ranges:\n if check_is_private_cluster(self.mc):\n raise MutuallyExclusiveArgumentError(\n \"--api-server-authorized-ip-ranges is not supported for private cluster\"\n )\n return api_server_authorized_ip_ranges",
"def sign_in_audience(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sign_in_audience\")",
"def sign_in_audience(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sign_in_audience\")",
"def msa_app_tenant_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"msa_app_tenant_id\")",
"def _validate_credentials(self):\n\n # There should be a client_id and client secret\n return \"client_id\" in self.credentials.keys() and \"client_secret\" in self.credentials.keys() \\\n and self.credentials[\"client_id\"] and self.credentials[\"client_secret\"]",
"def get_appgw_id(self) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get(\"CONST_INGRESS_APPGW_ADDON_NAME\")\n CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID = addon_consts.get(\"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID\")\n\n # read the original value passed by the command\n appgw_id = self.raw_param.get(\"appgw_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID) is not None\n ):\n appgw_id = self.mc.addon_profiles.get(\n CONST_INGRESS_APPGW_ADDON_NAME\n ).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID)\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return appgw_id",
"def _get_enable_azure_rbac(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_rbac = self.raw_param.get(\"enable_azure_rbac\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.enable_azure_rbac is not None\n ):\n enable_azure_rbac = self.mc.aad_profile.enable_azure_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_azure_rbac:\n if self.decorator_mode == DecoratorMode.CREATE:\n if not self._get_enable_aad(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n if self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--enable-azure-rbac cannot be used together with --disable-rbac\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-azure-rbac\" if managed AAD is not enabled'\n )\n if self._get_disable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-azure-rbac\" and \"--disable-azure-rbac\" at the same time'\n )\n return enable_azure_rbac",
"def get_config() -> (int, Optional[AppConfig]):\r\n es_host = os.environ.get(\"ES_HOST\", \"\")\r\n if es_host == \"\":\r\n logging.error(\"empty env value for ES Host\")\r\n return 1, None\r\n\r\n es_port_str = os.environ.get(\"ES_PORT\", \"\")\r\n if es_port_str == \"\":\r\n logging.error(\"empty env value for ES PORT\")\r\n return 1, None\r\n try:\r\n es_port = int(es_port_str)\r\n except ValueError as ve:\r\n logging.exception(\r\n \"failed to convert es_port env var into int: {}\".format(es_port), exc_info=ve)\r\n return 1, None\r\n\r\n es_index = os.environ.get(\"ES_INDEX\", \"\")\r\n if es_index == \"\":\r\n logging.error(\"empty env value for ES INDEX\")\r\n return 1, None\r\n\r\n doc_tag = os.environ.get(\"DOC_TAG\", \"\")\r\n if doc_tag == \"\":\r\n logging.error(\"empty evn value for DOC_TAG\")\r\n return 1, None\r\n\r\n interval_str = os.environ.get(\"PUB_INTVL\", \"\")\r\n if doc_tag == \"\":\r\n logging.error(\"empty env value for PUB_INTVL\")\r\n return 1, None\r\n try:\r\n interval = int(interval_str)\r\n except ValueError as ve:\r\n logging.exception(\"invalid value for PUB_INTVL: {}\".format(interval_str),\r\n exc_info=ve)\r\n return 1, None\r\n\r\n gpio_pin_str = os.environ.get(\"GPIO_PIN\", \"\")\r\n if gpio_pin_str == \"\":\r\n logging.error(\"empty env value for GPIO_PIN\")\r\n return 1, None\r\n try:\r\n gpio_pin = int(gpio_pin_str)\r\n except ValueError as ve:\r\n logging.exception(\r\n \"failed to convert gpio pin env var to int: {}\".format(gpio_pin_str),\r\n exc_info=ve)\r\n return 1, None\r\n\r\n app_cfg = AppConfig(es_host=es_host,\r\n es_port=es_port,\r\n es_index=es_index,\r\n doc_tag=doc_tag,\r\n pub_intvl=interval,\r\n gpio_pin=gpio_pin,\r\n sensor=SENSOR)\r\n return 0, app_cfg",
"def validate_aud(self):\n aud_option = self.options.get('aud')\n aud = self.get('aud')\n if not aud_option or not aud:\n return\n\n aud_values = aud_option.get('values')\n if not aud_values:\n aud_value = aud_option.get('value')\n if aud_value:\n aud_values = [aud_value]\n\n if not aud_values:\n return\n\n if isinstance(self['aud'], list):\n aud_list = self['aud']\n else:\n aud_list = [self['aud']]\n\n if not any([v in aud_list for v in aud_values]):\n raise InvalidClaimError('aud')"
] |
[
"0.75942034",
"0.6455463",
"0.5557624",
"0.5554388",
"0.52089024",
"0.51852506",
"0.5142165",
"0.50324345",
"0.50048953",
"0.4997141",
"0.4969018",
"0.49064273",
"0.48956442",
"0.48653114",
"0.4811225",
"0.479971",
"0.47796237",
"0.47786278",
"0.47621787",
"0.4703882",
"0.46542335",
"0.4640764",
"0.46300936",
"0.46300936",
"0.46296123",
"0.4596634",
"0.4580235",
"0.4576059",
"0.45617384",
"0.45587188"
] |
0.8070726
|
0
|
Obtain the value of aad_client_app_id, aad_server_app_id and aad_server_app_secret. This function will verify the parameters by default. If the value of enable_aad is True and any of aad_client_app_id, aad_server_app_id or aad_server_app_secret is asssigned, a MutuallyExclusiveArgumentError will be raised.
|
def get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(
self,
) -> Tuple[Union[str, None], Union[str, None], Union[str, None]]:
return self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n self, enable_validation: bool = False\n ) -> Tuple[Union[str, None], Union[str, None], Union[str, None]]:\n # get aad profile from `mc`\n aad_profile = None\n if self.mc:\n aad_profile = self.mc.aad_profile\n\n # read the original value passed by the command\n aad_client_app_id = self.raw_param.get(\"aad_client_app_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.client_app_id is not None:\n aad_client_app_id = aad_profile.client_app_id\n\n # read the original value passed by the command\n aad_server_app_id = self.raw_param.get(\"aad_server_app_id\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.server_app_id is not None:\n aad_server_app_id = aad_profile.server_app_id\n\n # read the original value passed by the command\n aad_server_app_secret = self.raw_param.get(\"aad_server_app_secret\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if aad_profile and aad_profile.server_app_secret is not None:\n aad_server_app_secret = aad_profile.server_app_secret\n\n # these parameters do not need dynamic completion\n\n # validation\n if enable_validation:\n enable_aad = self._get_enable_aad(enable_validation=False)\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n return aad_client_app_id, aad_server_app_id, aad_server_app_secret",
"def _get_enable_aad(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_aad = self.raw_param.get(\"enable_aad\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.managed is not None\n ):\n enable_aad = self.mc.aad_profile.managed\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n enable_validation=False\n )\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if enable_aad:\n if check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-aad\" if managed AAD is already enabled'\n )\n return enable_aad",
"def aad_client_id(self) -> Optional[str]:\n return pulumi.get(self, \"aad_client_id\")",
"def get_enable_aad(self) -> bool:\n\n return self._get_enable_aad(enable_validation=True)",
"def acs_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"acs_secret\")",
"def authorized_gae_applications(self) -> Optional[pulumi.Input[List[pulumi.Input[str]]]]:\n return pulumi.get(self, \"authorized_gae_applications\")",
"def _get_aad_tenant_id(\n self, enable_validation: bool = False, read_only: bool = False\n ) -> Union[str, None]:\n # read the original value passed by the command\n aad_tenant_id = self.raw_param.get(\"aad_tenant_id\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.tenant_id is not None\n ):\n aad_tenant_id = self.mc.aad_profile.tenant_id\n read_from_mc = True\n\n # skip dynamic completion & validation if option read_only is specified\n if read_only:\n return aad_tenant_id\n\n # dynamic completion for create mode only\n if self.decorator_mode == DecoratorMode.CREATE:\n if not read_from_mc and not self._get_enable_aad(\n enable_validation=False\n ):\n if aad_tenant_id is None and any(\n self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(enable_validation=False)\n ):\n profile = Profile(cli_ctx=self.cmd.cli_ctx)\n _, _, aad_tenant_id = profile.get_login_credentials()\n\n # validation\n if enable_validation:\n if aad_tenant_id:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--aad-tenant-id\" if managed AAD is not enabled'\n )\n return aad_tenant_id",
"def app_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_secret\")",
"def get_service_principal_and_client_secret(\n self\n ) -> Tuple[Union[str, None], Union[str, None]]:\n return self._get_service_principal_and_client_secret(enable_validation=True)",
"def get_aad_tenant_id(self) -> Union[str, None]:\n return self._get_aad_tenant_id(enable_validation=True)",
"def test_create_application_credential(self):\n app_cred = self.create_application_credential()\n\n # Check that the secret appears in the create response\n secret = app_cred['secret']\n\n # Check that the secret is not retrievable after initial create\n app_cred = self.non_admin_app_creds_client.show_application_credential(\n user_id=self.user_id,\n application_credential_id=app_cred['id']\n )['application_credential']\n self.assertNotIn('secret', app_cred)\n\n # Check that the application credential is functional\n _, resp = self.non_admin_token.get_token(\n app_cred_id=app_cred['id'],\n app_cred_secret=secret,\n auth_data=True\n )\n self.assertEqual(resp['project']['id'], self.project_id)",
"def set_client_id_and_secret(request):\n if request.method == 'POST':\n form = ClientIDAndSecretForm(request.POST)\n if form.is_valid():\n client_id = form.cleaned_data['client_id']\n client_secret = form.cleaned_data['client_secret']\n additional_client_ids = form.cleaned_data['additional_client_ids']\n auth_utils.SecretKey.set_config(client_id, client_secret,\n additional_client_ids)\n return HttpResponseRedirect(reverse(set_client_id_and_secret))\n else:\n form = ClientIDAndSecretForm()\n return respond(request, 'set_client_id_and_secret.html', {'form': form})",
"def sign_in_audience(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sign_in_audience\")",
"def sign_in_audience(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sign_in_audience\")",
"def _valid_app_ids(app_ids):\n for app_id in app_ids:\n try:\n app_data = KNACK_CREDENTIALS[app_id]\n\n except KeyError:\n return False\n\n return True",
"def _validate_credentials(self):\n\n # There should be a client_id and client secret\n return \"client_id\" in self.credentials.keys() and \"client_secret\" in self.credentials.keys() \\\n and self.credentials[\"client_id\"] and self.credentials[\"client_secret\"]",
"def server_app_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_app_secret\")",
"def set_client_id_and_secret(request):\n if request.method == 'POST':\n form = ClientIDAndSecretForm(request.POST)\n if form.is_valid():\n client_id = form.cleaned_data['client_id']\n client_secret = form.cleaned_data['client_secret']\n additional_client_ids = form.cleaned_data['additional_client_ids']\n whitelisted_emails = form.cleaned_data['whitelisted_emails']\n logging.info('Adding client_id: %s' % client_id)\n auth_utils.SecretKey.set_config(client_id, client_secret,\n additional_client_ids,\n whitelisted_emails)\n else:\n logging.info('Form is invalid')\n return HttpResponseRedirect(reverse(set_client_id_and_secret))\n else:\n client_id, client_secret, additional_client_ids, whitelisted_emails = \\\n auth_utils.SecretKey.get_config()\n form = ClientIDAndSecretForm(initial={\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'additional_client_ids': additional_client_ids,\n 'whitelisted_emails': whitelisted_emails})\n return respond(request, 'set_client_id_and_secret.html', {'form': form})",
"def __ValidateAppId(self, app_id):\n assert app_id\n if not self.__trusted and app_id != self.project_id:\n raise datastore_errors.BadRequestError(\n 'app %s cannot access app %s\\'s data' % (self.project_id, app_id))",
"def client_basic_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_basic_secret\")",
"def client_basic_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_basic_secret\")",
"def get_attach_acr(self) -> Union[str, None]:\n # read the original value passed by the command\n attach_acr = self.raw_param.get(\"attach_acr\")\n\n # this parameter does not need dynamic completion\n # validation\n if self.decorator_mode == DecoratorMode.CREATE and attach_acr:\n if self._get_enable_managed_identity(enable_validation=False):\n # Attach acr operation will be handled after the cluster is created\n if self.get_no_wait():\n raise MutuallyExclusiveArgumentError(\n \"When --attach-acr and --enable-managed-identity are both specified, \"\n \"--no-wait is not allowed, please wait until the whole operation succeeds.\"\n )\n else:\n # newly added check, check whether client_id exists before creating role assignment\n service_principal, _ = self._get_service_principal_and_client_secret(read_only=True)\n if not service_principal:\n raise RequiredArgumentMissingError(\n \"No service principal provided to create the acrpull role assignment for acr.\"\n )\n return attach_acr",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")"
] |
[
"0.75075585",
"0.621237",
"0.5496758",
"0.5382286",
"0.5201899",
"0.5133036",
"0.50387406",
"0.49052104",
"0.4841495",
"0.48401716",
"0.47986048",
"0.4797894",
"0.47620252",
"0.47620252",
"0.4748089",
"0.47434065",
"0.4737469",
"0.47354063",
"0.46972966",
"0.46837527",
"0.46837527",
"0.46794868",
"0.4678587",
"0.4678587",
"0.4678587",
"0.4678587",
"0.4678587",
"0.4678587",
"0.4678587",
"0.4678587"
] |
0.7235697
|
1
|
Internal function to obtain the value of aad_admin_group_object_ids. This function supports the option of enable_validation. When enabled in update mode, if aad_admin_group_object_ids is specified, while aad_profile is not set or managed aad is not enabled, raise an InvalidArgumentValueError. This function will normalize the parameter by default. It will split the string into a list with "," as the delimiter.
|
def _get_aad_admin_group_object_ids(self, enable_validation: bool = False) -> Union[List[str], None]:
# read the original value passed by the command
aad_admin_group_object_ids = self.raw_param.get("aad_admin_group_object_ids")
# In create mode, try to read the property value corresponding to the parameter from the `mc` object.
read_from_mc = False
if self.decorator_mode == DecoratorMode.CREATE:
if (
self.mc and
self.mc.aad_profile and
self.mc.aad_profile.admin_group_object_i_ds is not None
):
aad_admin_group_object_ids = self.mc.aad_profile.admin_group_object_i_ds
read_from_mc = True
# keep None as None, but empty string ("") to empty list ([])
if not read_from_mc and aad_admin_group_object_ids is not None:
aad_admin_group_object_ids = aad_admin_group_object_ids.split(',') if aad_admin_group_object_ids else []
# validation
if enable_validation:
if aad_admin_group_object_ids:
if self.decorator_mode == DecoratorMode.UPDATE:
if not check_is_managed_aad_cluster(self.mc):
raise InvalidArgumentValueError(
'Cannot specify "--aad-admin-group-object-ids" if managed AAD is not enabled'
)
return aad_admin_group_object_ids
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_aad_admin_group_object_ids(self) -> Union[List[str], None]:\n return self._get_aad_admin_group_object_ids(enable_validation=True)",
"def admin_group_object_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"admin_group_object_ids\")",
"def admin_group_object_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"admin_group_object_ids\")",
"def admin_group_object_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"admin_group_object_ids\")",
"def getAdGroupIds(self):\n query = \"\"\"\n select adgroups.id as adgroup_id from adgroups \n join campaigns on campaigns.id = adgroups.campaign_id\n where adgroups.account_id = '%s'\n and campaigns.status = 'enabled'\n and adgroups.status = 'enabled'\n \n \"\"\" % (self.account_id)\n\n df = pd.read_sql(query, Database().createEngine())\n ids = list(df.adgroup_id.values)\n return ids",
"def get_group_admin_group_ids(self):\n return set(\n group.admin_group_id for group in self.find(type='U')\n )",
"def group_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"group_ids\")",
"def update_admin_ids():\n admin_emails_config = Registry.get_config_property(\n 'admin_emails')\n if not admin_emails_config:\n return []\n\n admin_ids = []\n for email in admin_emails_config.value:\n user_id = user_services.get_user_id_from_email(email)\n if user_id is not None:\n admin_ids.append(user_id)\n else:\n raise Exception('Bad admin email: %s' % email)\n return admin_ids",
"def admin_object_ids(self) -> Sequence[str]:\n return pulumi.get(self, \"admin_object_ids\")",
"def group_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"group_ids\")",
"def get_award_ids (self, column):\n raw = self[column]\n vals = map (lambda x:x.strip(), raw.split(','))\n # return list (set (vals))\n return vals\n\n if 0:\n # truncated = filter (None, map (lambda x: len(x)>5 and x[-5:] or None, vals))\n truncated = filter (None, map (lambda x: len(x)>5 and normalize_id(x) or None, vals))\n\n # we only want the unique values (e.g. crossref lists dups sometimes)\n return list (set (truncated))",
"def security_group_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def admin_ids(self):\n # type: () -> List[int]\n return self._admin_ids",
"def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def __ui_convert_ids_string_to_list(string_of_ids):\n if string_of_ids == \"\":\n return []\n string_of_ids = string_of_ids.strip()\n string_of_ids = string_of_ids.replace(\",\", \" \")\n\n done = False\n while not done:\n if string_of_ids.find(\" \") == -1:\n done = True\n else:\n string_of_ids = string_of_ids.replace(\" \", \" \")\n list_of_ids = string_of_ids.split(\" \")\n for id_index in range(len(list_of_ids)):\n list_of_ids[id_index] = int(list_of_ids[id_index])\n return list_of_ids",
"def security_group_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"security_group_ids\")",
"def update_moderator_ids():\n moderator_emails_config = Registry.get_config_property(\n 'moderator_emails')\n if not moderator_emails_config:\n return []\n\n moderator_ids = []\n for email in moderator_emails_config.value:\n user_id = user_services.get_user_id_from_email(email)\n if user_id is not None:\n moderator_ids.append(user_id)\n else:\n raise Exception('Bad moderator email: %s' % email)\n return moderator_ids",
"def get_ids_strings(self, ids):\n #Split ids by list no longer than 1000 units,\n #because vk api can only gets 1000 ids per one call \n splitted_ids = list(self.chunks(ids, 1000))\n ids_in_list = []\n \n #crate list of strings with ids\n for split_ids in splitted_ids:\n user_ids = ''\n #make string ids list. Do it because of api requirement\n for id in split_ids:\n user_ids += str(id) + \",\"\n #remove last \",\"\n user_ids = user_ids[:-1]\n ids_in_list.append(user_ids)\n\n return ids_in_list",
"def security_group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def get_admin_ids(bot, chat_id):\r\n return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]",
"def get_gadm_list():\n cur = g.db.execute('select id_user from user_group where gadm == 1', [uid])\n gadm = [row[0] for row in cur.fetchall()]\n return gadm",
"def get_user_ida_groups():\n if not is_authenticated() or 'samlUserdata' not in session:\n return None\n\n groups = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('idm_groups', None), False)\n\n return [group for group in groups if group.startswith('IDA')] if groups else not_found('groups')\n return None",
"def _get_api_server_authorized_ip_ranges(self, enable_validation: bool = False) -> List[str]:\n # read the original value passed by the command\n api_server_authorized_ip_ranges = self.raw_param.get(\n \"api_server_authorized_ip_ranges\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n read_from_mc = False\n if (\n self.mc and\n self.mc.api_server_access_profile and\n self.mc.api_server_access_profile.authorized_ip_ranges is not None\n ):\n api_server_authorized_ip_ranges = (\n self.mc.api_server_access_profile.authorized_ip_ranges\n )\n read_from_mc = True\n\n # normalize\n if not read_from_mc:\n api_server_authorized_ip_ranges = [\n x.strip()\n for x in (\n api_server_authorized_ip_ranges.split(\",\")\n if api_server_authorized_ip_ranges\n else []\n )\n ]\n elif self.decorator_mode == DecoratorMode.UPDATE:\n # normalize, keep None as None\n if api_server_authorized_ip_ranges is not None:\n api_server_authorized_ip_ranges = [\n x.strip()\n for x in (\n api_server_authorized_ip_ranges.split(\",\")\n if api_server_authorized_ip_ranges\n else []\n )\n ]\n\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n if api_server_authorized_ip_ranges:\n if (\n safe_lower(self._get_load_balancer_sku(enable_validation=False)) ==\n CONST_LOAD_BALANCER_SKU_BASIC\n ):\n raise InvalidArgumentValueError(\n \"--api-server-authorized-ip-ranges can only be used with standard load balancer\"\n )\n if self._get_enable_private_cluster(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--api-server-authorized-ip-ranges is not supported for private cluster\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if api_server_authorized_ip_ranges:\n if check_is_private_cluster(self.mc):\n raise MutuallyExclusiveArgumentError(\n \"--api-server-authorized-ip-ranges is not supported for private cluster\"\n )\n return api_server_authorized_ip_ranges",
"def get_targeted_ad_group_ids(client, customer_id, resource_name):\n ga_service = client.get_service(\"GoogleAdsService\")\n\n query = \"\"\"\n SELECT\n ad_group.id,\n ad_group_extension_setting.extension_feed_items\n FROM ad_group_extension_setting\n WHERE\n ad_group_extension_setting.extension_type = 'PROMOTION'\n AND ad_group.status != 'REMOVED'\"\"\"\n\n stream = ga_service.search_stream(customer_id=customer_id, query=query)\n\n ad_group_ids = []\n\n for batch in stream:\n for row in batch.results:\n feed_items = row.ad_group_extension_setting.extension_feed_items\n if resource_name in feed_items:\n print(f\"Found matching ad group with ID: '{row.ad_group.id}'\")\n ad_group_ids.append(row.ad_group.id)\n\n return ad_group_ids",
"def get_admin_ids(bot, chat_id):\n return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]",
"def get_admin_ids(bot, chat_id):\n return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]",
"def get_admin_ids(bot, chat_id):\n return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]",
"def get_ids(self) -> List[str]:",
"def extract_entity_ids(hass, service_call, expand_group=True):\n if not (service_call.data and ATTR_ENTITY_ID in service_call.data):\n return []\n\n group = hass.components.group\n\n # Entity ID attr can be a list or a string\n service_ent_id = service_call.data[ATTR_ENTITY_ID]\n\n if expand_group:\n\n if isinstance(service_ent_id, str):\n return group.expand_entity_ids([service_ent_id])\n\n return [ent_id for ent_id in\n group.expand_entity_ids(service_ent_id)]\n\n else:\n\n if isinstance(service_ent_id, str):\n return [service_ent_id]\n\n return service_ent_id"
] |
[
"0.78241324",
"0.7128215",
"0.7058258",
"0.7058258",
"0.5821447",
"0.56956625",
"0.5670238",
"0.5612666",
"0.55328345",
"0.54451746",
"0.5062579",
"0.50551534",
"0.5042906",
"0.5023152",
"0.5023152",
"0.4999395",
"0.48978865",
"0.4784435",
"0.47647277",
"0.47604626",
"0.47378287",
"0.47258866",
"0.4706366",
"0.46901384",
"0.46705562",
"0.46702212",
"0.46702212",
"0.46702212",
"0.4655637",
"0.46398908"
] |
0.8040217
|
0
|
Obtain the value of aad_admin_group_object_ids. This function will verify the parameter by default. In update mode, if aad_admin_group_object_ids is specified, while aad_profile is not set or managed aad is not enabled, raise an InvalidArgumentValueError. This function will normalize the parameter by default. It will split the string into a list with "," as the delimiter.
|
def get_aad_admin_group_object_ids(self) -> Union[List[str], None]:
return self._get_aad_admin_group_object_ids(enable_validation=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_aad_admin_group_object_ids(self, enable_validation: bool = False) -> Union[List[str], None]:\n # read the original value passed by the command\n aad_admin_group_object_ids = self.raw_param.get(\"aad_admin_group_object_ids\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n read_from_mc = False\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.admin_group_object_i_ds is not None\n ):\n aad_admin_group_object_ids = self.mc.aad_profile.admin_group_object_i_ds\n read_from_mc = True\n\n # keep None as None, but empty string (\"\") to empty list ([])\n if not read_from_mc and aad_admin_group_object_ids is not None:\n aad_admin_group_object_ids = aad_admin_group_object_ids.split(',') if aad_admin_group_object_ids else []\n\n # validation\n if enable_validation:\n if aad_admin_group_object_ids:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--aad-admin-group-object-ids\" if managed AAD is not enabled'\n )\n\n return aad_admin_group_object_ids",
"def admin_group_object_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"admin_group_object_ids\")",
"def admin_group_object_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"admin_group_object_ids\")",
"def admin_group_object_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"admin_group_object_ids\")",
"def group_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"group_ids\")",
"def update_admin_ids():\n admin_emails_config = Registry.get_config_property(\n 'admin_emails')\n if not admin_emails_config:\n return []\n\n admin_ids = []\n for email in admin_emails_config.value:\n user_id = user_services.get_user_id_from_email(email)\n if user_id is not None:\n admin_ids.append(user_id)\n else:\n raise Exception('Bad admin email: %s' % email)\n return admin_ids",
"def getAdGroupIds(self):\n query = \"\"\"\n select adgroups.id as adgroup_id from adgroups \n join campaigns on campaigns.id = adgroups.campaign_id\n where adgroups.account_id = '%s'\n and campaigns.status = 'enabled'\n and adgroups.status = 'enabled'\n \n \"\"\" % (self.account_id)\n\n df = pd.read_sql(query, Database().createEngine())\n ids = list(df.adgroup_id.values)\n return ids",
"def admin_object_ids(self) -> Sequence[str]:\n return pulumi.get(self, \"admin_object_ids\")",
"def group_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"group_ids\")",
"def get_group_admin_group_ids(self):\n return set(\n group.admin_group_id for group in self.find(type='U')\n )",
"def get_award_ids (self, column):\n raw = self[column]\n vals = map (lambda x:x.strip(), raw.split(','))\n # return list (set (vals))\n return vals\n\n if 0:\n # truncated = filter (None, map (lambda x: len(x)>5 and x[-5:] or None, vals))\n truncated = filter (None, map (lambda x: len(x)>5 and normalize_id(x) or None, vals))\n\n # we only want the unique values (e.g. crossref lists dups sometimes)\n return list (set (truncated))",
"def admin_ids(self):\n # type: () -> List[int]\n return self._admin_ids",
"def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def __ui_convert_ids_string_to_list(string_of_ids):\n if string_of_ids == \"\":\n return []\n string_of_ids = string_of_ids.strip()\n string_of_ids = string_of_ids.replace(\",\", \" \")\n\n done = False\n while not done:\n if string_of_ids.find(\" \") == -1:\n done = True\n else:\n string_of_ids = string_of_ids.replace(\" \", \" \")\n list_of_ids = string_of_ids.split(\" \")\n for id_index in range(len(list_of_ids)):\n list_of_ids[id_index] = int(list_of_ids[id_index])\n return list_of_ids",
"def security_group_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def security_group_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"security_group_ids\")",
"def update_moderator_ids():\n moderator_emails_config = Registry.get_config_property(\n 'moderator_emails')\n if not moderator_emails_config:\n return []\n\n moderator_ids = []\n for email in moderator_emails_config.value:\n user_id = user_services.get_user_id_from_email(email)\n if user_id is not None:\n moderator_ids.append(user_id)\n else:\n raise Exception('Bad moderator email: %s' % email)\n return moderator_ids",
"def get_gadm_list():\n cur = g.db.execute('select id_user from user_group where gadm == 1', [uid])\n gadm = [row[0] for row in cur.fetchall()]\n return gadm",
"def get_user_ida_groups():\n if not is_authenticated() or 'samlUserdata' not in session:\n return None\n\n groups = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('idm_groups', None), False)\n\n return [group for group in groups if group.startswith('IDA')] if groups else not_found('groups')\n return None",
"def __get_argument_of_premisegroup(premisegroup_uid, include_disabled):\n db_arguments = DBDiscussionSession.query(Argument).filter_by(premisegroup_uid=premisegroup_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []",
"def get_targeted_ad_group_ids(client, customer_id, resource_name):\n ga_service = client.get_service(\"GoogleAdsService\")\n\n query = \"\"\"\n SELECT\n ad_group.id,\n ad_group_extension_setting.extension_feed_items\n FROM ad_group_extension_setting\n WHERE\n ad_group_extension_setting.extension_type = 'PROMOTION'\n AND ad_group.status != 'REMOVED'\"\"\"\n\n stream = ga_service.search_stream(customer_id=customer_id, query=query)\n\n ad_group_ids = []\n\n for batch in stream:\n for row in batch.results:\n feed_items = row.ad_group_extension_setting.extension_feed_items\n if resource_name in feed_items:\n print(f\"Found matching ad group with ID: '{row.ad_group.id}'\")\n ad_group_ids.append(row.ad_group.id)\n\n return ad_group_ids",
"def get_admin_ids(bot, chat_id):\r\n return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]",
"def get_ids_strings(self, ids):\n #Split ids by list no longer than 1000 units,\n #because vk api can only gets 1000 ids per one call \n splitted_ids = list(self.chunks(ids, 1000))\n ids_in_list = []\n \n #crate list of strings with ids\n for split_ids in splitted_ids:\n user_ids = ''\n #make string ids list. Do it because of api requirement\n for id in split_ids:\n user_ids += str(id) + \",\"\n #remove last \",\"\n user_ids = user_ids[:-1]\n ids_in_list.append(user_ids)\n\n return ids_in_list",
"def security_group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def process_list_arg(arg):\n if isinstance(arg, list):\n return arg\n elif isinstance(arg, basestring):\n args = []\n for part in arg.split(\",\"):\n args.append(part.strip())\n return args",
"def is_group_admin_group(self):\n groups = self['__store']\n return self.group_id in groups.get_group_admin_group_ids()",
"def return_group_values(self, id_user:int) -> set:\n try:\n value_list_id = self.cursor.execute(f\"SELECT id_group FROM {table_users_groups} WHERE id_user={id_user};\").fetchall()\n if not value_list_id:\n return [], []\n value_list_id = ','.join([str(v[0]) for v in value_list_id])\n value_list_group = self.cursor.execute(f\"SELECT id, name FROM {table_groups} WHERE id IN ({value_list_id});\").fetchall()\n return [v[0] for v in value_list_group], [v[1] for v in value_list_group]\n except Exception as e:\n msg = f\"We faced problems with getting values of the groups to the user; Mistake: {e}\"\n self.proceed_error(msg)\n return [], []",
"def get_all_group_ids(token) -> list:\n ids=list()\n _dict = perform_request(app_config.ENDPOINT, token)\n while True:\n for obj in _dict[\"value\"]:\n ids.append(obj[\"id\"])\n if \"@odata.nextLink\" not in _dict:\n return ids\n _dict = perform_request(_dict[\"@odata.nextLink\"], token)",
"def get_admin_ids(bot, chat_id):\n return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]"
] |
[
"0.76494515",
"0.71606064",
"0.71606064",
"0.7147788",
"0.5648135",
"0.5629551",
"0.5549631",
"0.5545147",
"0.5501235",
"0.5494188",
"0.51931924",
"0.49949396",
"0.49620852",
"0.49620852",
"0.49533767",
"0.49023682",
"0.48139718",
"0.47528076",
"0.4749792",
"0.46772647",
"0.46582392",
"0.464114",
"0.46345636",
"0.46165183",
"0.46082547",
"0.45963007",
"0.4580388",
"0.4572819",
"0.45714644",
"0.4565512"
] |
0.7743399
|
0
|
Internal function to obtain the value of disable_rbac. This function supports the option of enable_validation. When enabled, if the values of disable_rbac and enable_azure_rbac are both True, a MutuallyExclusiveArgumentError will be raised. Besides, if the values of enable_rbac and disable_rbac are both True, a MutuallyExclusiveArgumentError will be raised.
|
def _get_disable_rbac(self, enable_validation: bool = False) -> Union[bool, None]:
# read the original value passed by the command
disable_rbac = self.raw_param.get("disable_rbac")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.enable_rbac is not None
):
disable_rbac = not self.mc.enable_rbac
# this parameter does not need dynamic completion
# validation
if enable_validation:
if disable_rbac and self._get_enable_azure_rbac(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"--enable-azure-rbac cannot be used together with --disable-rbac"
)
if disable_rbac and self._get_enable_rbac(enable_validation=False):
raise MutuallyExclusiveArgumentError("specify either '--disable-rbac' or '--enable-rbac', not both.")
return disable_rbac
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_disable_azure_rbac(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_azure_rbac = self.raw_param.get(\"disable_azure_rbac\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_azure_rbac:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--disable-azure-rbac\" if managed AAD is not enabled'\n )\n if self._get_enable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-azure-rbac\" and \"--disable-azure-rbac\" at the same time'\n )\n return disable_azure_rbac",
"def get_disable_rbac(self) -> Union[bool, None]:\n\n return self._get_disable_rbac(enable_validation=True)",
"def get_disable_azure_rbac(self) -> bool:\n return self._get_disable_azure_rbac(enable_validation=True)",
"def _get_enable_rbac(self, enable_validation: bool = False) -> Union[bool, None]:\n # read the original value passed by the command\n enable_rbac = self.raw_param.get(\"enable_rbac\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.enable_rbac is not None\n ):\n enable_rbac = self.mc.enable_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_rbac and self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\"specify either '--disable-rbac' or '--enable-rbac', not both.\")\n return enable_rbac",
"def _get_enable_azure_rbac(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_azure_rbac = self.raw_param.get(\"enable_azure_rbac\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.enable_azure_rbac is not None\n ):\n enable_azure_rbac = self.mc.aad_profile.enable_azure_rbac\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_azure_rbac:\n if self.decorator_mode == DecoratorMode.CREATE:\n if not self._get_enable_aad(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n if self._get_disable_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--enable-azure-rbac cannot be used together with --disable-rbac\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if not check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-azure-rbac\" if managed AAD is not enabled'\n )\n if self._get_disable_azure_rbac(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-azure-rbac\" and \"--disable-azure-rbac\" at the same time'\n )\n return enable_azure_rbac",
"def get_enable_rbac(self) -> Union[bool, None]:\n return self._get_enable_rbac(enable_validation=True)",
"def get_enable_azure_rbac(self) -> bool:\n\n return self._get_enable_azure_rbac(enable_validation=True)",
"def _get_disable_vpa(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_vpa = self.raw_param.get(\"disable_vpa\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_vpa and self._get_enable_vpa(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-vpa and --disable-vpa at the same time.\"\n )\n\n return disable_vpa",
"def _get_disable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_azure_keyvault_kms = self.raw_param.get(\"disable_azure_keyvault_kms\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_azure_keyvault_kms and self._get_enable_azure_keyvault_kms(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-azure-keyvault-kms and --disable-azure-keyvault-kms at the same time.\"\n )\n\n return disable_azure_keyvault_kms",
"def enable_rbac(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_rbac\")",
"def _get_disable_ahub(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_ahub = self.raw_param.get(\"disable_ahub\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if disable_ahub and self._get_enable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return disable_ahub",
"def _get_disable_azure_monitor_metrics(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_azure_monitor_metrics = self.raw_param.get(\"disable_azure_monitor_metrics\")\n if enable_validation:\n if disable_azure_monitor_metrics and self._get_enable_azure_monitor_metrics(False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-azure-monitor-metrics and --disable-azure-monitor-metrics at the same time\"\n )\n return disable_azure_monitor_metrics",
"def _get_enable_aad(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n enable_aad = self.raw_param.get(\"enable_aad\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.aad_profile and\n self.mc.aad_profile.managed is not None\n ):\n enable_aad = self.mc.aad_profile.managed\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n (\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ) = self._get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(\n enable_validation=False\n )\n if enable_aad:\n if any(\n [\n aad_client_app_id,\n aad_server_app_id,\n aad_server_app_secret,\n ]\n ):\n raise MutuallyExclusiveArgumentError(\n \"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or \"\n \"--aad-server-app-secret\"\n )\n if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--enable-azure-rbac can only be used together with --enable-aad\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if enable_aad:\n if check_is_managed_aad_cluster(self.mc):\n raise InvalidArgumentValueError(\n 'Cannot specify \"--enable-aad\" if managed AAD is already enabled'\n )\n return enable_aad",
"def enable_azure_rbac(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_azure_rbac\")",
"def enable_azure_rbac(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_azure_rbac\")",
"def _get_disable_keda(self, enable_validation: bool = False) -> bool:\n # Read the original value passed by the command.\n disable_keda = self.raw_param.get(\"disable_keda\")\n\n # This option is not supported in create mode, hence we do not read the property value from the `mc` object.\n # This parameter does not need dynamic completion.\n if enable_validation:\n if disable_keda and self._get_enable_keda(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --enable-keda and --disable-keda at the same time.\"\n )\n\n return disable_keda",
"def enable_rbac(self) -> bool:\n return pulumi.get(self, \"enable_rbac\")",
"def get_disable_vpa(self) -> bool:\n return self._get_disable_vpa(enable_validation=True)",
"def _get_disable_secret_rotation(self, enable_validation: bool = False) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n\n # read the original value passed by the command\n disable_secret_rotation = self.raw_param.get(\"disable_secret_rotation\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if disable_secret_rotation:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--disable-secret-rotation can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled. \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return disable_secret_rotation",
"def _get_disable_local_accounts(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n disable_local_accounts = self.raw_param.get(\"disable_local_accounts\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n hasattr(self.mc, \"disable_local_accounts\") and # backward compatibility\n self.mc.disable_local_accounts is not None\n ):\n disable_local_accounts = self.mc.disable_local_accounts\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if disable_local_accounts and self._get_enable_local_accounts(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"Cannot specify --disable-local-accounts and \"\n \"--enable-local-accounts at the same time.\"\n )\n return disable_local_accounts",
"def get_disable_ahub(self) -> bool:\n return self._get_disable_ahub(enable_validation=True)",
"def get_disable_azure_keyvault_kms(self) -> bool:\n return self._get_disable_azure_keyvault_kms(enable_validation=True)",
"def get_enable_aad(self) -> bool:\n\n return self._get_enable_aad(enable_validation=True)",
"def _get_enable_secret_rotation(self, enable_validation: bool = False) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n CONST_SECRET_ROTATION_ENABLED = addon_consts.get(\n \"CONST_SECRET_ROTATION_ENABLED\"\n )\n\n # read the original value passed by the command\n enable_secret_rotation = self.raw_param.get(\"enable_secret_rotation\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_SECRET_ROTATION_ENABLED) is not None\n ):\n enable_secret_rotation = self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_SECRET_ROTATION_ENABLED) == \"true\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if enable_secret_rotation:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--enable-secret-rotation can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled. \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return enable_secret_rotation",
"def get_disable_secret_rotation(self) -> bool:\n return self._get_disable_secret_rotation(enable_validation=True)",
"def _get_rotation_poll_interval(self, enable_validation: bool = False) -> Union[str, None]:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = addon_consts.get(\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n # read the original value passed by the command\n rotation_poll_interval = self.raw_param.get(\"rotation_poll_interval\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_ROTATION_POLL_INTERVAL) is not None\n ):\n rotation_poll_interval = self.mc.addon_profiles.get(\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n ).config.get(CONST_ROTATION_POLL_INTERVAL)\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.UPDATE:\n if rotation_poll_interval:\n azure_keyvault_secrets_provider_enabled = (\n self.mc and\n self.mc.addon_profiles and\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME).enabled\n )\n if not azure_keyvault_secrets_provider_enabled:\n raise InvalidArgumentValueError(\n \"--rotation-poll-interval can only be specified \"\n \"when azure-keyvault-secrets-provider is enabled \"\n \"Please use command 'az aks enable-addons' to enable it.\"\n )\n return rotation_poll_interval",
"def _get_enable_ahub(\n self, enable_validation: bool = False\n ) -> bool:\n # read the original value passed by the command\n enable_ahub = self.raw_param.get(\"enable_ahub\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n if self.mc and self.mc.windows_profile:\n enable_ahub = self.mc.windows_profile.license_type == \"Windows_Server\"\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if enable_ahub and self._get_disable_ahub(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--enable-ahub\" and \"--disable-ahub\" at the same time'\n )\n return enable_ahub",
"def get_disable_azure_monitor_metrics(self) -> bool:\n return self._get_disable_azure_monitor_metrics(enable_validation=True)",
"def _get_no_uptime_sla(self, enable_validation: bool = False) -> bool:\n # read the original value passed by the command\n no_uptime_sla = self.raw_param.get(\"no_uptime_sla\")\n # We do not support this option in create mode, therefore we do not read the value from `mc`.\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if no_uptime_sla and self._get_uptime_sla(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--uptime-sla\" and \"--no-uptime-sla\" at the same time.'\n )\n\n if no_uptime_sla and self.get_tier() == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD:\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--no-uptime-sla\" and \"--tier standard\" at the same time.'\n )\n\n return no_uptime_sla",
"def is_Disable_allowed(self):\n handler = self.get_command_object(\"Disable\")\n return handler.check_allowed()"
] |
[
"0.8219556",
"0.8052859",
"0.78519756",
"0.74519753",
"0.7272",
"0.68960583",
"0.6440081",
"0.61812997",
"0.61400986",
"0.61082125",
"0.6054763",
"0.5924363",
"0.5892928",
"0.5864097",
"0.5864097",
"0.580556",
"0.5716333",
"0.56800663",
"0.56755596",
"0.5580242",
"0.5285145",
"0.5274494",
"0.51434416",
"0.50556976",
"0.50075",
"0.50005203",
"0.4961427",
"0.48593563",
"0.4829881",
"0.48295778"
] |
0.8521468
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.