query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Prints out homepage options to the user | def homepage(self):
print('-=' * 12 + " Home Page " + '-=' * 12)
self._user.list_contacts()
options = {1: self.add_contact, 2:self.remove_contact ,3: self.view_contact_chat, 4: self.sign_out, 5: self.exit}
print_out = "(1) Add new contact \n (2) Remove Contact \n (3) View my chats \n (4) Sign out \n (5) Exit"
return self._take_option(options, print_out) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_other_options():\n print(\"> - Next Song page.\")\n print(\"< - Previous song page.\")\n print(\"q - to quit\")",
"def main_menu(self):\n welcome = \"\"\"\n ************************\n * WELCOME TO CARSHARE! *\n ************************\n \"\"\"\n intro = \"Are you a USER or an ENGINEER?\"\n option1 = \"[1] USER\"\n option2 = \"[2] ENGINEER\"\n print(welcome, intro, option1, option2, sep='\\n')",
"def show_home(self):\n print(self.home.name)",
"def homepage():\r\n print(__name__ + \" invoked\")",
"def henhouseDisplayMenu () :\r\n print('1.Predict egg production')\r\n print('2.Display needs')\r\n print('0.Exit henhouse management')\r\n print()\r\n print('Please choose an option from the above menu')",
"def homepage():\n return {'sample': 'ADAL'}",
"def otherOptionsFullScreen(self):\n\n # Set Storage List\n storageList = []\n # Create Intel explain menu\n menuDisplay = \"\"\"\n \\n\n [*] Information Verbose:\n Ontop of Asking for the Username and \n Password Should we Gather Even\n More Information about the User such as \n GEOIP / ISP / User Agent etc. etc. \n This Requires Curl to be installed or \n file_get_contents in PHP on selected Server \n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n # Set Verbose of Intel Gather\n self.results = input(\n \"\\nWould you like to Build a More In-depth Intel Report on Victim ( y Or n ): \")\n if self.results.lower()[0] == \"y\" or self.results.lower() == \"yes\":\n storageList.append(\"INTEL_VERBOSE_LOUD\")\n elif self.results.lower()[0] == \"n\" or self.results.lower() == \"no\":\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n else:\n # Anything Else lets just Hush it then\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n # Redirect Ask\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Redirect URL Which is the Same \n = URL of the Full-Screen Attack \n = you picked. For Instance If \n = it was AOL Full-Screen Attack\n = the default URL redirect would \n = be https://my.screenname.aol.com\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"After the Victim Inputs Info Where Should the Script Redirect?: \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"REDIRECT_DEFAULT\")\n else:\n # No Checking on URL Let Them Use Whatever lol there bad i guess\n # Append Default Redirect Naaaow\n storageList.append(self.results)\n\n # Spoof link\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the URL Link to be spoofed\n = to? This will be displayed when the user\n = rolls over the link. Basically tricking\n = them making them think they are going\n = to that URL..\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL be spoofed to? (ex: https://my.screenname.aol.com): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_SPOOF\")\n else:\n # Append specified spoof url now\n storageList.append(self.results)\n\n # link name\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the Actual URL name\n = to be?\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL name be? (ex: Aol Login): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_URL_NAME\")\n else:\n # Append url name\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = name of Index.php If you feel \n = the need to change the name please \n = do not add the actual extension .php \n = along with it only add whatever crazy \n = name you come up with\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What Should the Main Index PHP File Be Called? ( ex: login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"INDEX_DEFAULT\")\n else:\n check = self.results.find(\".\")\n # if it doesn't return a -1 it found a decimal\n if check != -1:\n # Throw Error we found a dot\n self.errorOutput(\n \"[*] Error - Didn't We Say Not to Add an Extension, WOW...\", \"yellow\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Title of the Webpage.\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"blue\")\n self.results = input(\n \"What Should the Title of the Page be? (ex: AOL Login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"TITLE_DEFAULT\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n # Return Storage List for Processing\n return storageList",
"def show_main_screen():\n option = algo_selection(algos)\n if option == 1:\n print_factorial()\n show_main_screen()\n if option == 2:\n print_gcd()\n show_main_screen()\n if option == 3:\n print_pow()\n show_main_screen()\n if option == 4:\n print_towers()\n show_main_screen()\n if option == 5:\n print_permutations()\n show_main_screen()\n if option == 6:\n raise SystemExit(0)",
"def display_menu():\n print(\"\"\"\\nChoose option:\n (1) List statistics\n (2) Display 3 cities with longest names\n (3) Display county's name with the largest number of communities\n (4) Display locations, that belong to more than one category\n (5) Advanced search\n (0) Exit program\"\"\")",
"def home_page():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n )",
"def homepage():\r\n words = story.prompts\r\n # i didn't realize you could access class variables like this\r\n\r\n return render_template(\"homepage.html\", words = words)",
"def make_navbar_for_homepage(self):\n links = [\n \"home\", [\"Result Pages\", self._result_page_links()], \"Version\"\n ]\n if len(self.samples) > 1:\n links[1][1] += [\"Comparison\"]\n if self.publication:\n links.insert(2, \"Publication\")\n if self.gwdata is not None:\n links.append([\"Detchar\", [i for i in self.gwdata.keys()]])\n if self.notes is not None:\n links.append(\"Notes\")\n return links",
"def print_menu():\r\n clear()\r\n print(\"Ratatouille Server\")\r\n print(\"---------------------------\")\r\n print(\"\")\r\n\r\n for (index, func) in MENU.items():\r\n print(\"%d - %s\" % (index, func.__name__))\r\n\r\n return raw_input(\"Choose an option: \").lstrip()",
"def menu_spe_homepage(self, event=None):\n self.link('http://pythonide.stani.be')",
"def show_menu():\r\n print(\"Write a number of the next options:\")\r\n for key, value in enumerate(options):\r\n print(\"{}. {}\".format(key, value))",
"def printMenu():\n # tWelc = PrettyTable(['Welcome to the CLI-of the repository classifier'])\n print('Welcome to the CLI of the repository classifier')\n print(strStopper1)\n t = PrettyTable(['Action', ' Shortcut '])\n t.add_row(['Show Menu', '- m -'])\n t.add_row([' Predict repositories form txt-file ', '- i -'])\n t.add_row(['Input URL', '- u -'])\n t.add_row(['Show Info', '- f -'])\n t.add_row(['Train Model', '- t -'])\n t.add_row(['set GitHub-Token', '- g -'])\n t.add_row(['Help', '- h -'])\n t.add_row(['Quit', '- q -'])\n print(t)\n print('')",
"def home(self):\n \n #Header:\n print(\"HOME\")\n print(\"-\" * 20)\n\n #Define the user:\n print(\"Who are you?\")\n print(\"1. Admin\\n2. Dealer\\n\")\n user = input(\"Please press 1, 2 or 3 to make your pick: \")\n return user",
"def print_menu():\r\n print(\"==============================================\")\r\n print(\"What do you want to do now? \")\r\n print(\"==============================================\")\r\n print(\"Available options:\")\r\n i = 1\r\n for a in available_actions:\r\n if current_state in a[\"valid_states\"]:\r\n # Only hint about the action if the current state allows it\r\n print(\" %i) %s\" % (i, a[\"description\"]))\r\n i += 1\r\n print()",
"def homepage():\n return (\n f\"Welcome to Hawaii - Climate Page<br/>\"\n f\"<br/>\"\n f\"This site has data from 01-01-2010 to 08-23-2017<br/>\"\n f\"<br/>\"\n f\"Available Pages:<br/>\"\n f\"<br/>\"\n f\"<br/>\"\n f\" Station Information<br/>\"\n f\" /api/v1.0/stations<br/>\"\n f\"<br/>\"\n f\" Percipitation Information<br/>\"\n f\" /api/v1.0/percipitation<br/>\"\n f\"<br/>\"\n f\" Temperature Observations<br/>\"\n f\" /api/v1.0/tobs<br/>\"\n f\"<br/>\"\n f\" Start Date information - complete url is '/api/v1.0//yyyy-mm-dd'<br/>\"\n f\" /api/v1.0/start<br/>\"\n f\"<br/>\"\n f\" Start and End Date information - complete url is '/api/v1.0/yyyy-mm-dd/yyyy-mm-dd'<br/>\"\n f\" /api/v1.0/start/end\"\n )",
"def menu_python_homepage(self, event=None):\n self.link('http://www.python.org')",
"def showPrefs(self):\n\t\tfor i in range(0,20):\n\t\t\tprint \"\"\n\t\t\n\t\tprint \"User Preferences\"\n\t\tprint \"================\"\n\t\tprint \"\"\n\t\t\n\t\tif self.newestOrTop == \"top\":\n\t\t\tprint \"Currently viewing top stories on HN.\"\n\t\telse:\n\t\t\tprint \"Currently viewing newest stories on HN.\"\n\t\tprint \"--------------------------------------------------------------------------------\"\n\t\t\t\n\t\tif self.hnUserName != \"\":\n\t\t\tprint \"HN username = \" + self.hnUserName + \". Karma = \" + str(self.karma)\n\t\t\tprint \"--------------------------------------------------------------------------------\"\n\t\t\t\n\t\tif self.showDomains:\n\t\t\tprint \"d -- show domains of stories.\"\n\t\telse:\n\t\t\tprint \"w -- show webpage URLs of stories.\"\n\t\tprint \"--------------------------------------------------------------------------------\"\n\t\t\n\t\tif self.showFullTitles:\n\t\t\tprint \"l -- always show full titles of stories.\"\n\t\telse:\n\t\t\tprint \"o -- truncate titles of stories to fit an 80-character terminal window.\"\n\t\tprint \"--------------------------------------------------------------------------------\"\n\t\t\n\t\tif self.collapseOldStories:\n\t\t\tprint \"c -- collapse stories after reading.\"\n\t\telse:\n\t\t\tprint \"e -- don't collapse stories after reading.\"\n\t\tprint \"--------------------------------------------------------------------------------\"\n\t\t\t\n\t\tprint \"\"\n\t\tinput = raw_input(\"Press Return to go back to the Hacker News stories.\")",
"def print_menu():\n print()\n print(\"Main Menu\")\n print(\"---------\")\n print(\"1 - Process a new data file\")\n print(\"2 - Choose units\")\n print(\"3 - Edit room filter\")\n print(\"4 - Show summary statistics\")\n print(\"5 - Show temperature by date and time\")\n print(\"6 - Show histogram of temperatures\")\n print(\"7 - Quit\")\n print()",
"def a_homepage():\n\n\treturn render_template('home/a_homepage.html', title=\"Homepage Admin\")",
"def help():\n print(UI.HELP)",
"def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs\"\n )",
"def help_opt(self):\n print(OPTIONS)",
"def home_page():\n return \"<h4>Welcome !</h4><br><a href='/fetch'>View Results</a>\"",
"def menu_cust(self):\n intro = \"Here are the options available for you to choose from:\"\n option1 = \"[1] UNLOCK THE CAR\"\n option2 = \"[2] RETURN THE CAR\"\n option3 = \"[3] BACK\"\n print(intro, option1, option2, option3, sep='\\n')",
"def _home(self, op, context):\n self.page = \"HOME\"\n return {'FINISHED'}",
"def _default(self):\n self.app.args.print_help()"
]
| [
"0.7042643",
"0.67346436",
"0.66125506",
"0.64347136",
"0.6407639",
"0.64059675",
"0.64050734",
"0.6386845",
"0.63713026",
"0.6307135",
"0.62678397",
"0.6239708",
"0.6216758",
"0.618103",
"0.61778545",
"0.6174112",
"0.61685836",
"0.6142617",
"0.6133943",
"0.61180687",
"0.6090335",
"0.6078417",
"0.6071326",
"0.6065741",
"0.60549796",
"0.60501385",
"0.6045542",
"0.6044291",
"0.6042806",
"0.6039185"
]
| 0.71009314 | 0 |
Add a contact to a user account by contact's username | def add_contact(self):
contact_mob_num = self._input_mob_num("-=" * 30 + "\n" + "Please enter contact's mobile number to be added: ")
if contact_mob_num == self._user.mob_num:
print("You can't add yourself, IDIOT!!")
return self.homepage()
found_contact = self.auth.get_users_by_MobNum(contact_mob_num)
if found_contact != None:
print('A user with Mobile number: "{0}", and User name: "{1}" is found'.format(found_contact.mob_num, found_contact.username))
user_choice = self._int_input_in_range(" (1) Add the found user. \n (0) Back to Home page \n Your choice: "
,range_ = (0, 1))
if user_choice:
add_flag = self._user.add_contact(found_contact)
if not add_flag:
print('This user is already one of your contacts')
return self.homepage()
print("Contact added successfully")
else:
self.homepage()
else:
print('This user mobile number has no matches')
return self.homepage() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_adduser(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tself.cl.add_contact()\n\t\telse:\n\t\t\tprint(\"To add contacts you need to open or create a book.\")",
"def add_contact(self, contact):\n\t\tclient_log.debug(f'Создание контакта {contact}')\n\t\treq = {\n\t\t\tACTION: ADD_CONTACT,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username,\n\t\t\tACCOUNT_NAME: contact\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tself.process_server_ans(get_message(self.transport))",
"def add_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() not in db:\n db[contact.get_hash_name()] = json.loads(contact.json())\n write_db(db)\n else:\n sys.exit(logger.fail('fatal: contact already exists'))",
"def addcontact(name, address=None, phone=None, email=None):\n try:\n newid = str(r.incr(\"global:nextUserId\"))\n _setcontact(newid, name, address, phone, email)\n r.sadd(\"contacts\", newid)\n\n return _getcontact(newid)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise",
"def add_contact(self, name, number, email, zipcode):\n \n new_contact = f\"{name}, {number}, {email}, {zipcode}\"\n contact_list = [name,number,email,zipcode]\n self.contacts.append(contact_list)\n self.save()\n print(f\"Thank you {new_contact} has been added to your contact book.\")",
"def add_contact(self, contact):\n self.db.insert_contact(contact)\n return self.update_contacts()",
"def add_contact_to_google_account(self, i):\n\n self.add_contact_to_phone(i)",
"def add_contact_to_db_by_one(name, email, module_db_id, contact_id):\n success = False\n if name is not None:\n try:\n done_email = email.lower().strip()\n validate_email(done_email)\n\n if contact_id:\n try:\n contact = Contact.objects.get(id=contact_id, list_owner_id=module_db_id)\n contact.name_and_last_name = name\n contact.email = email\n contact.status = 1\n contact.save()\n success = True\n except Contact.DoesNotExist:\n pass\n else:\n contact, created = Contact.objects.get_or_create(list_owner_id=module_db_id, email=email)\n if created and contact:\n contact.name_and_last_name = name\n contact.status = 1\n contact.save()\n success = True\n except Exception as e:\n print(e.args)\n\n return success, name, email",
"def add_contact():\n return 'add contact'",
"def addUser(self, accountId, username, accesstype, **kwargs):\n #put your code here to implement this method\n raise NotImplementedError (\"not implemented method addUser\")",
"def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)",
"def AddContact(self, contact):\n\t\tcontact.group_membership_info = [gdata.contacts.data.GroupMembershipInfo(href=self.GetFirstGroupId())]\n\t\ttry:\n\t\t\tself.client.CreateContact(contact)\n\t\texcept gdata.client.RequestError:\n\t\t\tpass",
"def on_contact(self, update, context):\n user = update.effective_user\n chat_id = update.effective_chat.id\n phone = update.message.contact.phone_number\n log.info(\n \"TEL from %s, %s, @%s, %s\", user.username, user.full_name, chat_id, phone,\n )\n\n # Here's an example of what else you can find in update['message'].contact.to_dict()\n # {'phone_number': '+4500072470000', 'first_name': 'Alex', 'user_id': 253150000}\n # And some user-related details in update.effective_user.to_dict()\n # {'first_name': 'Alex', 'id': 253150000, 'is_bot': False, 'language_code': 'en', 'username': 'ralienpp'}\n\n # Tell the backend about it, such that from now on it knows which chat_id corresponds to this user\n known_user = self.backend.link_chatid_to_volunteer(\n user.username, update.effective_chat.id, phone\n )\n\n if known_user:\n # Mark the user as available once onboarding is complete\n context.user_data[\"state\"] = c.State.AVAILABLE\n # Acknowledge receipt and tell the user that we'll contact them when new requests arrive\n update.message.reply_text(c.MSG_STANDBY)\n return\n\n # If we got this far, this is a completely new person who initiated the registration process via the bot, it is\n # time to ask them a few things and build a profile\n self.build_profile(update, context, phone=phone)",
"def do_add10000users(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\ttry:\n\t\t\t\tself.cl.add_10000contacts(str(line))\n\t\t\texcept FileNotFoundError:\n\t\t\t\tprint(\"There is no such file!\")",
"def do_addContact(self, line):\n\t\tif not(self.db is None):\n\t\t\tcont = self.db.contact\n\t\t\tcontact_info = {\n\t\t\t\t'first_name': input(\"First name: \"),\n\t\t\t\t'surname': input(\"Surname: \"),\n\t\t\t\t'company': input(\"Company: \"),\n\t\t\t\t'address': input(\"Address: \"),\n\t\t\t\t'telephone': input(\"Telephone: \"),\n\t\t\t\t'email': input(\"Email: \")\n\t\t\t}\n\t\t\tcont.insert_one(contact_info)\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")",
"def add_user(self, name, data=None):\n if data is None:\n data = {}\n self.users[irc.strings.IRCFoldedCase(modules.trim_nick(name))] = data",
"def addAccountContact(self,contact, accountId, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/customer/accounts/{accountId}/contacts?responseFields={responseFields}\", \"POST\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"accountId\", accountId);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).withBody(contact).execute();\r\n\t\treturn self.client.result();",
"def test_add_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n dictionary = org.as_dict()\n assert dictionary['contacts']\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']",
"def add_contact(database, name: str, email: str, phone: int) -> None:\n # Searches the database for the the current contact (excel row)\n cursor = database.execute(\"SELECT DISTINCT name, email, phone FROM contacts \"\n \"WHERE name = ? AND email =? OR phone = ?\", (name, email, phone))\n # Assigns the cursor results to the 'row' variable\n row = cursor.fetchone()\n # print(row) # For debugging\n\n # This checks if the contact already exists in the database or not\n if row:\n print(\"\\n{}, {}, {} is already in the database.\".format(name, email, phone))\n # Add the contact to the 'duplicates' table to retain the info in case of any\n # discrepancies in the final database.\n database.execute(\"INSERT INTO duplicates VALUES (?, ?, ?)\", (name, email, phone))\n else:\n cursor.execute(\"INSERT INTO contacts VALUES (?, ?, ?)\", (name, email, phone)) # Add contact to db\n cursor.connection.commit()\n # print(\"{}, {}, {} added to database.\".format(name, email, phone)) # For debugging",
"def add_contact(cmd, *args):\n cfg = get_config()\n nick = None\n if len(args) == 0:\n print(add_contact.__doc__)\n if len(args) >= 1:\n nick = args[0]\n fulname = nick # fullname fallback\n if len(args) >= 2:\n fullname = args[1]\n #print('fullname %s' %fullname)\n else:\n print(\"cant handle those params \" + str(args))\n\n vcard_fn = nick + '.vcf'\n vcard_fn = os.path.join(cfg['vcard_dir'], vcard_fn)\n #print('expecting file at %s' %vcard_fn)\n\n info = {}\n info['nick'] = nick\n info['fullname'] = fullname\n if len(fullname.split(' ')) > 1:\n subname = fullname.split()\n info['name'] = {'family': subname[0], 'given': subname[1]}\n if os.path.isfile(vcard_fn):\n print('file exists for %s, at %s please move or rename it'\n % (nick, vcard_fn))\n return False\n vcard = vobject.vCard()\n if os.path.isfile(vcard_fn):\n vcard = loadcraphere\n else:\n vcard_merge_in_dict(info, vcard)\n rawdata = vcard.serialize()\n with open(vcard_fn, 'w+') as fh:\n fh.write(rawdata)\n #print('written, sucker!')\n #annoyingly verbose vcard here'\n #Full Name = fn. Single string, entire name, required\n #x = vobject.vCard()\n # x.name = 'Foo'",
"def add_contact(self, request, **kwargs):\n if request.data is None:\n return Response({'message': 'Invalid contact details'}, status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('first_name') is None:\n return Response({'message': 'First name not provided'}, status=status.HTTP_400_BAD_REQUEST)\n\n contact_data = request.data.get('contact')\n for data in contact_data:\n print(data.get('phone'))\n try:\n parse_number = phonenumbers.parse(data.get('phone'), None)\n except Exception:\n return Response({'details': 'Invalid Phonenumber'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n if not phonenumbers.is_valid_number(parse_number):\n return Response({'details': 'Invalid Phonenumber entered'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n new_contact_data = ContactCreationAndUpdationMixin().create(request.data)\n group = self.get_object()\n group.contacts.add(new_contact_data)\n serializer_data = ContactSerializer(new_contact_data) \n return Response(serializer_data.data)",
"def add_user(name: str, last_name: str, username: str) -> None:\n with connection:\n connection.execute(ADD_USER, (name, last_name, username))",
"def add_user(self, user):\n\t\tself.users[user.username] = user",
"def add_contact_to_db(self):\n self.init_db(self._testing)\n\n # make sure that the object is not in the db\n assert self.uid == \"\"\n\n self._insert_row_into_db(Contact.table_name, Contact.columns, self.values)\n\n # update this objects uid\n self.uid = self._get_id_of_last_row(Contact.table_name)",
"def register(self, name, contact):\n return Registration(self.request).add(name, contact)",
"async def create_contact(dbcon: DBConnection, name: Optional[str], email: Optional[str],\n phone: Optional[str], active: bool) -> str:\n q = \"\"\"insert into contacts (name, email, phone, active) values (%s, %s, %s, %s)\"\"\"\n q_args = (name, email, phone, active)\n contact_id = await dbcon.operation(q, q_args)\n return contact_id",
"def findAndAddContactByUserid(self, userid):\n try:\n contact = self._findAndAddContactsByUserid(userid)\n except TalkException as e:\n self.raise_error(e.reason)\n\n contact = contact.values()[0]\n\n for c in self.contacts:\n if c.id == contact.mid:\n self.raise_error(\"%s already exists\" % contact.displayName)\n return\n\n c = LineContact(self, contact)\n self.contacts.append(c)\n\n self.contacts.sort()\n return c",
"def invite(self, greeting=None):\n if not greeting:\n greeting = \"Hi, {0}, I'd like to add you as a contact.\".format(self.name)\n prefix = \"28\" if isinstance(self, SkypeBotUser) else \"8\"\n self.skype.conn(\"POST\", \"{0}/users/{1}/contacts\".format(SkypeConnection.API_CONTACTS, self.skype.userId),\n auth=SkypeConnection.Auth.SkypeToken, json={\"mri\": \"{0}:{1}\".format(prefix, self.id),\n \"greeting\": greeting})",
"def edit_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() in db:\n db[contact.get_hash_name()] = json.loads(contact.json())\n write_db(db)\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))",
"def add_user(self, email, last_name, first_name, phone_numbers):\n return run_transaction(\n self.sessionfactory,\n lambda session: add_user_txn(session, email, last_name,\n first_name, phone_numbers))"
]
| [
"0.70311785",
"0.688382",
"0.65328264",
"0.6457848",
"0.6391224",
"0.63722444",
"0.63144225",
"0.6200419",
"0.6163992",
"0.6162148",
"0.6123405",
"0.6102063",
"0.60712075",
"0.6050677",
"0.6011818",
"0.6008362",
"0.59680736",
"0.5960808",
"0.591985",
"0.5908906",
"0.5906909",
"0.5884452",
"0.58690345",
"0.5847433",
"0.5812874",
"0.57562625",
"0.57444036",
"0.5728375",
"0.5720096",
"0.57163185"
]
| 0.70890373 | 0 |
print all chats usernames then view one of the contact's chat | def view_contact_chat(self):
if self._user.chats == {}:
print("No chats to be viewed yet")
self.homepage()
print('-=' * 30)
chats = self._user.list_chats()
user_choice = self._int_input_in_range("Pick whose contact chat to be viewed: "
,range_ = (1, len(chats)))
if not user_choice:
return self.homepage()
chat, contact = chats[user_choice - 1]
chat_content = chat.get_content(self._user)
print('-=' * 12 + " Chat Window " + '-=' * 12)
if chat_content != []:
for line in chat_content:
print(line.rstrip())
else:
print('This chat is empty, send your first msg now')
user_choice = self._int_input_in_range(' (1) Send new msg \n (2) Back to homepage \n Your choice: '
, range_ = (1,2))
if user_choice == 1:
print('HINT: send (0) to exist the chat window')
return self._send_msg(contact)
else:
return self.homepage() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_messages(stdscr):\n # Show the cursor and echo output.\n curses.curs_set(1)\n curses.echo()\n conversations = filesystem.conversations()\n stdscr.clear()\n row = 1\n column = 1\n for name in conversations:\n safe_put(stdscr, name, (row, column))\n row += 1\n safe_put(stdscr, \"Start typing a name: \", (row+1, column))\n stdscr.refresh()\n selection = \"\"\n possibilities = conversations\n while len(possibilities) > 1:\n selection += chr(stdscr.getch())\n if selection.endswith(\"\\n\") and selection[:-1] in possibilities:\n # Hit enter to confirm the choice of a username when it's a\n # substring of another username.\n possibilities = [selection[:-1]]\n break\n possibilities = [p for p in possibilities if p.startswith(selection)]\n curses.curs_set(0)\n curses.noecho()\n stdscr.clear()\n stdscr.refresh()\n if possibilities:\n read_message(stdscr, possibilities[0])\n else:\n print(\"No user matched '{selection}'\".format(selection=selection))",
"def printusers(self, irc, msg, args):\n irc.reply(self.user_report(), prefixNick=False)",
"def contacts(request):\n User = get_user_model()\n ids = set(request.user.chatmessage_set.all().values_list(\"recipients\", flat=True))\n context = {\n 'contacts': User.objects.filter(pk__in=ids)\n }\n return render(request, \"chat/contacts.html\", context)",
"def chat():\n kwargs = {\"title\": u\"chat channel\", \"entries\": log.getLogEntries()}\n return render_template(\"chat.html\", **kwargs)",
"def users(bot, event, *args):\n\n username_lower = ' '.join(args).strip().lower()\n\n html = '<b>Results for user named \"{}\":</b><br />'.format(' '.join(args)) if username_lower else '<b>Users in chat (total {}):</b><br />'.format(len(event.conv.users))\n\n for u in sorted(event.conv.users, key=lambda x: x.full_name.split()[-1]):\n if username_lower in u.full_name.lower():\n link = 'https://plus.google.com/u/0/{}/about'.format(u.id_.chat_id)\n html += '<a href=\"{}\">{}</a>'.format(link, u.full_name)\n if u.emails:\n html += ' (<a href=\"mailto:{}\">{}</a>)'.format(u.emails[0], u.emails[0])\n html += '<br /><i>{}</i><br />'.format(u.id_.chat_id)\n\n bot.send_message_parsed(event.conv, html)",
"def index(request):\n\n chats = Chat.objects.all().order_by('-created_at')\n\n if request.user.is_authenticated():\n chats = chats.filter(friend_groups__in=request.user.get_profile().\\\n friend_groups.all().values_list('id'))\n else:\n chats = chats.filter(friend_groups__isnull=True)\n\n return render_to_response('index.html', {\n 'chats': chats[:10],\n }, context_instance=RequestContext(request))",
"def show_users():\n return 'hehe'",
"def get_contacts_list(self):\n contacts = self.driver.find_elements_by_class_name(\"_1wjpf\")\n s= [contact.text for contact in contacts] #extracts chats and last messsages\n print (\"get contacts: \"+str(s)) #print only chat names\n return s[::2] #returns only chat names",
"def get_users(msg: telebot.types.Message):\n users = User.select()\n m = ''\n for user in users:\n menu_caption = \"In PVP game\" if user.state == states.USER_IN_PVP_GAME else \"In AI game\" if user.state == states.USER_IN_AI_GAME else \"In menu\"\n m += f'[{user.first_name}](tg://user?id={user.user_id}) - {menu_caption}\\n'\n\n bot.send_message(\n msg.from_user.id,\n m,\n parse_mode='Markdown'\n )",
"async def list(self, ctx):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n message = []\n message.append(\"```\\n\")\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n if len(self.twitch_streams) > 0:\n for stream in self.twitch_streams:\n message.append(stream[\"NAME\"] + \"\\n\")\n else:\n message.append(\"No streams found!\")\n message.append(\"```\")\n output = ''.join(message)\n await self.bot.say(output)\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")",
"def view_users(stdscr):\n stdscr.clear()\n safe_put(stdscr, \"* marks a user online at last update. Hit any key to return to menu.\", (2, 1))\n row = 4\n for user in taunet.users.all():\n if user.is_on:\n safe_put(stdscr, \"*\", (row, 1))\n safe_put(stdscr, user.name, (row, 3))\n row += 1\n stdscr.refresh()\n\n # Wait for any key, then clear and return to menu.\n stdscr.getch()\n stdscr.clear()\n stdscr.refresh()",
"def chat(tcp, udp, userId, data, friends, chats):\n\n # confirm user is a friend\n if data[0] in friends:\n\n # form server, get updated address for friend\n tcp.sendMessage('SEARCH ' + data[0])\n address = tcp.receiveMessage().split()[-2:]\n address = (address[0], int(address[1]))\n\n # confirm user is on line\n if address:\n\n # update chat counter\n if data[0] in chats:\n chats[data[0]] = chats[data[0]] + 1\n else: chats[data[0]] = 1\n\n # get number of chats\n num = str(chats[data[0]])\n\n # send message to friend\n udp.sendto('CHAT ' + userId + ' ' + num + ' ' + data[1], address)\n print 'Sent message to ' + data[0]\n\n # the user is off line\n else: print 'Could not send chat to ' + data[0]\n\n # the user is not a friend\n else: print 'The following user is not a friend: ' + data[0]",
"def channels(message):\n load_users(message._client.users)\n for x in message._client.channels:\n chan = message._client.channels[x]\n if 'is_member' in chan:\n if chan['is_member']:\n message.reply(\"{} ({})\".format(chan['name'], chan['id']))\n# message.reply(pretty_json(chan, True))\n elif 'is_im' in chan:\n print(chan)\n friendlyname = chan['user']\n try:\n friendlyname = chan['user'].name\n except KeyError:\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n chan['id']))",
"def print_users(self):\n for i, item in enumerate(self.users):\n print(\"{}. {}\".format(i, item.name))",
"def conversation(self, line, teamchat):\n # print(type(line))\n if (line.split(\" \")[0] == \"[chat]:\" or line.split(\" \")[0] == \"[teamchat]:\") and line.split(\" \")[1] != \"***\":\n if teamchat:\n result = re.search(\"\\[teamchat\\]: (\\d+):(.+):(.+): (.+)\", line)\n else:\n result = re.search(\"\\[chat\\]: (\\d+):(.+):(.+): (.+)\", line)\n name = result.groups()[2]\n ide = result.groups()[0]\n message = result.groups()[-1]\n team_chat = result.groups()[1]\n info = [name, message, ide, team_chat]\n return info\n #[chat]: 0:-2:LeveL 5: mo\n else:\n info = [\"NONE\", \"NONE\", \"NONE\"]\n return info",
"def chat(self):\n return self._get(\"chat\")",
"def channels(message):\n for channel in message._client.channels:\n if 'is_member' in channel:\n message.reply(\"{} ({})\".format(channel['name'], channel['id']))\n elif 'is_im' in channel:\n #print(channel)\n friendlyname = channel['user']\n try:\n friendlyname = channel['user'][\"name\"]\n except (KeyError, AttributeError):\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n channel['id']))",
"def view_all_persons():\n message = ''\n global conn\n with conn:\n rows = select_all_persons(conn)\n for row in rows:\n message += str(row) + \"\\n\"\n messagebox.showinfo('Person Table', message)",
"def display_messages(self):\n\n\t\twhile self.joined:\n\t\t\tif len(self.messages) != 0:\n\t\t\t\tfor msg in self.messages:\n\t\t\t\t\t#: If the message is empty, ignore it.\n\t\t\t\t\tif msg == \"\":\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t#: If the message is close\", then the server has told the client\n\t\t\t\t\t#: to shut down, so it will. This is not an issue, as users\n\t\t\t\t\t#: messages will always have an identifier and : before their\n\t\t\t\t\t#: message, thus,the only messages that don't include an\n\t\t\t\t\t#: identifier will be from the server itself.\n\t\t\t\t\telif msg[:5] == \"close\":\n\n\t\t\t\t\t\treason = msg[6:]\n\n\t\t\t\t\t\tprint(\"This client was closed due to {}.\".format(reason))\n\t\t\t\t\t\tself.quit(True)\n\n\t\t\t\t\t#: Otherwise, print the message to the commandline.\n\t\t\t\t\telif not self.silent:\n\t\t\t\t\t\tprint('\\r' + msg, end='')\n\n\t\t\t\t\t\tprint(\"\\nYou: \", end='')\n\t\t\t\t\t\tself.displayed_you = True\n\n\t\t\t\t\t#: Remove the processed message\n\t\t\t\t\tself.messages.remove(msg)",
"def display_contact(self):\n contacts = \"\".join(str(contact) for contact in self.contact_list)\n print(contacts)",
"def joingroup_command(update,context):\n update.message.reply_text('Want to chat with other CTF players or ask questions to admins? Use the following channel:\\r\\nhttps://t.me/joinchat/CYsj-xwzlFqIbQPPeo04bw')",
"def names_interaction():\n already_printed = []\n for protocol in protocols:\n for account in protocol.accounts:\n for contact in account.contacts:\n for message in contact.messages:\n if message.name not in already_printed:\n already_printed.append(message.name)\n print(message.name)\n nicks = input(\"Own nicks, comma separated: \")\n nicks = nicks.split(\",\")\n nicks = [nick.strip() for nick in nicks]\n return nicks",
"def chatlist(request):\n\n chats = get_chat_list()\n chat_list = pagination(request, chats, CHATS_PER_PAGE)\n\n dic = {'chatlist': chat_list}\n return render_to_response('whatsapp/chatlist.html', dic, context_instance=RequestContext(request))",
"async def info(self, ctx, user : str=None):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n message = []\n message.append(\"```\\n\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n if user:\n for stream in self.twitch_streams:\n if stream[\"NAME\"] == user:\n message.append(\"Stream name: \" + str(stream[\"NAME\"]) + \"\\n\")\n\n if stream[\"IMAGE\"]:\n message.append(\"Image URL: \" + str(stream[\"IMAGE\"]) + \"\\n\")\n else:\n message.append(\"Image URL: N/A\\n\")\n\n if stream[\"LOGO\"]:\n message.append(\"Logo URL: \" + str(stream[\"LOGO\"] + \"\\n\"))\n else:\n message.append(\"Logo URL: N/A\\n\")\n\n if stream[\"CHANNEL\"]:\n message.append(\"Assigned channel ID: \" + str(stream[\"CHANNEL\"]) + \"\\n\")\n else:\n message.append(\"Assigned channel ID: N/A\\n\")\n\n if stream[\"STATUS\"]:\n message.append(\"Status: \" + str(stream[\"STATUS\"]) + \"\\n\")\n else:\n message.append(\"Status: N/A\\n\")\n\n if stream[\"ALREADY_ONLINE\"]:\n message.append(\"ALREADY_ONLINE: \" + str(stream[\"ALREADY_ONLINE\"]) + \"\\n\")\n else:\n message.append(\"ALREADY_ONLINE: N/A\\n\")\n\n if stream[\"GAME\"]:\n message.append(\"Game: \" + str(stream[\"GAME\"]) + \"\\n\")\n else:\n message.append(\"Game: N/A\\n\")\n\n if stream[\"VIEWERS\"]:\n message.append(\"Viewers: \" + str(stream[\"VIEWERS\"]) + \"\\n\")\n else:\n message.append(\"Viewers: N/A\\n\")\n\n if stream[\"LANGUAGE\"]:\n message.append(\"Language: \" + str(stream[\"LANGUAGE\"]) + \"\\n\")\n else:\n message.append(\"Language: N/A\\n\")\n\n if stream[\"MESSAGE\"]:\n message.append(\"Message ID: \" + str(stream[\"MESSAGE\"]) + \"\\n\")\n else:\n message.append(\"Message ID: N/A\\n\")\n\n message.append(\"```\\n\")\n output = ''.join(message)\n await self.bot.say(output)\n\n else:\n await self.bot.say(\"Please provide a user!\")\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")",
"def _do_mein_spruch(self, chat_id, user_id, args, update):\n spruch = self.db.get_active_spruch(user_id)\n user_name = update[\"message\"][\"from\"][\"first_name\"]\n \n if not spruch:\n self.tclient.send_message('Ich habe noch keinen Nasenspruch von dir gespeichert, {}.'.format(user_name), chat_id)\n else:\n self.tclient.send_message('{}: <i>{}</i>'.format(user_name, spruch.text), chat_id)",
"def chat():\n username = request.cookies.get('username')\n\n if username != None and username != \"\":\n return r.renderContent('chat.html', name=username)\n return redirect('/login')",
"def userChat(userKeys, contactKey, contactName, chatting):\n print(\"\\n============== CHATTING WITH\", contactName, \"==============\\n\")\n\n def sendToClient(plaintext):\n \"\"\"Function to send message with signature to another client through server\"\"\"\n signature = userKeys.signUsingPrivateKey(plaintext)\n encryptedText = userKeys.encrypt(plaintext, contactKey)\n s.send(encryptedText)\n time.sleep(1)\n s.send(signature)\n\n # Loop infinitely for chatting\n while True:\n if chatting:\n textMessage = input(\">> \")\n sendToServer(b\"SendingMessage|\" + contactName.encode())\n time.sleep(0.1)\n sendToClient(textMessage.encode())\n else:\n txt = s.recv(1024)\n txtSig = s.recv(1024)\n txtDigest = userKeys.decryptUsingPrivateKey(txt)\n if userKeys.verifyUsingPublicKey(txtSig, txtDigest, contactKey):\n print(\"[\", contactName, \"] : \", txtDigest.decode())\n else:\n print(\"[\", contactName, \"] : BLOCKED MESSAGE\")\n print(\"MESSAGE MAY HAVE BEEN ALTERED IN TRANSIT, CANNOT BE TRUSTED.\")\n chatting = not chatting\n return",
"def help(chat_lines):\n for line in xrange(9):\n chat_lines[line].setTextColor(\"Blue\")\n chat_lines[0].setText(\"------------------------------------------Welcome to the online chat - Amdur's Chat!--------\"\n \"----------------------------------\")\n chat_lines[1].setText(\"You can use the following commands:\")\n chat_lines[2].setText(\"# 'Kick (Member)' - to kick a member (Only for Managers)\")\n chat_lines[3].setText(\"# 'Mute/UnMute (Member)' - to mute or cancel a mute of member (Only for Managers)\")\n chat_lines[4].setText(\"# 'Private (Member):' - to send message to a specific member \")\n chat_lines[5].setText(\"# 'Add (Member)' - to become a member to manager (Only for Managers)\")\n chat_lines[6].setText(\"# 'Manager list' - for getting the manager list\")\n chat_lines[7].setText(\"# 'Quit' - to leave chat \")\n chat_lines[8].setText(\"--------------------------------------------------------------------------------------------\"\n \"----------------------------------------------------\")",
"def search_public_chat(self, tg_username):\n data = {\n 'username': tg_username\n }\n return self._send_data('searchPublicChat', data)",
"def update_chats(self):\n res = requests.get(self.info_url)\n messages = res.json()['result']\n chats = set(m['message']['chat']['id'] for m in messages)\n for c in chats:\n if c not in self.chats:\n self.save_chat(c)\n return [c for c in chats]"
]
| [
"0.6596113",
"0.65658224",
"0.64484376",
"0.6312091",
"0.6303174",
"0.61384296",
"0.6124483",
"0.6101503",
"0.60731196",
"0.60624266",
"0.60381544",
"0.59948516",
"0.59897465",
"0.59654415",
"0.5922948",
"0.5921107",
"0.5865494",
"0.58365196",
"0.58352834",
"0.5827944",
"0.5811698",
"0.58016855",
"0.5798848",
"0.5780432",
"0.57716894",
"0.5770949",
"0.5766009",
"0.5757539",
"0.563777",
"0.56162477"
]
| 0.7599768 | 0 |
uses Users's send_msg method to send a msg to a certain contact | def _send_msg(self, contact):
msg_content = input('{} :'.format(self._user.username))
if msg_content == '0':
return self.homepage()
self._user.send_msg(contact, msg_content)
return self._send_msg(contact) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def message(self, msg):\n if msg['type'] in ('chat', 'normal'):\n msg.reply(\"Thanks for sending\\n%(body)s\" % msg).send()",
"def send(self, msg):\n self.message('Me', msg)",
"def sendto(self, name, msg):\n self.send(\"send/{}/{}:{}\".format(self.msg_id, name, msg))\n self.msg_id += 1",
"def send_message(contact, message):\n try:\n print('5 seconds to navigate to slack app..')\n time.sleep(5)\n\n # Use JumpTo slack feature\n pyautogui.hotkey('command', 'k')\n time.sleep(1)\n # Enter contact name in search box, click enter\n pyautogui.typewrite(contact)\n time.sleep(1)\n pyautogui.typewrite(['enter'])\n time.sleep(1)\n\n active = pyautogui.locateOnScreen('active_identifier.png')\n \n if not active:\n print(f'{contact} is not active, skipped contact')\n return\n \n print('Contact is active, sending message...')\n pyautogui.typewrite(['tab']) \n pyautogui.typewrite(message)\n pyautogui.typewrite(['enter'])\n\n except KeyboardInterrupt:\n print('Process was cancelled..')",
"def send_msg(self,msg,to=None,flag=\"CatMsg\"):\n\n if not to:\n to = self.__uri\n #Fetion now can use mobile number(09.02.23)\n #like tel: 13888888888\n #but not in sending to PC\n elif flag != \"CatMsg\" and to.startswith(\"tel:\"):\n pass\n\n elif flag == \"CatMsg\" and to.startswith(\"tel:\"):\n return False\n elif flag != \"CatMsg\" and len(to) == 11 and to.isdigit():\n to = \"tel:\"+to\n\n else:\n to = self.get_uri(to)\n if not to:\n return False\n self.get(flag,to,msg)\n try:\n response = self.send()\n except PyFetionSocketError,e:\n log(locals())\n return False\n\n code = self.get_code(response)\n if code == 280:\n log(\"Send sms OK!\")\n elif code == 200:\n log(\"Send msg OK!\")\n else:\n log(locals())\n return False\n return True",
"async def send_to_user(self, user: User, msg: Msg, address: str = None):\n if address is None:\n address = user.current_address\n\n await self.send(msg, address)",
"def send_message(self,contato,mensagem):\r\n #Open new chat on whatsapp web\r\n new_msg_button = self.driver.find_element_by_xpath(self.NEW_CHAT)\r\n new_msg_button.click()\r\n sleep(1)\r\n #Search the contact\r\n search_field = self.driver.find_element_by_xpath(self.SEARCH_CONTACT)\r\n search_field.click()\r\n search_field.send_keys(contato)\r\n sleep(1)\r\n #Click on the firts contact with the name that I told \r\n first_contact = self.driver.find_element_by_xpath(self.FIRST_CONTACT)\r\n first_contact.click()\r\n sleep(1.5)\r\n type_field = self.driver.find_element_by_xpath(self.TYPE_MSG)\r\n type_field.click()\r\n type_field.send_keys(mensagem)\r\n send_msg= self.driver.find_element_by_xpath(self.SEND_BUTTON)\r\n send_msg.click()\r\n sleep(1)",
"def send_message(self, to, subject, body):\n self.forum.send_message(self.game, Message(to=to, subject=subject, body=body))",
"def send(self, msg):\n pass",
"def send(self, msg):\n pass",
"def send(self, msg):\n pass",
"def sendmessage(user,gameid):\n message = request.form['message']\n channel.send_message(user+gameid,message)",
"def sendmessage(user,roomid):\n message = request.form['message']\n channel.send_message(user+roomid,message)",
"def send(self, from_phone, to_phone, msg):\r\n message = self.client.messages \\\r\n .create(\r\n body=msg,\r\n from_=from_phone,\r\n to=to_phone\r\n )\r\n print(message.status)",
"def flash_msg(self, params):\n if params.has_key('receiver'): name = params['receiver']\n else: \n if self.participant: \n group = self.service.groupOfParticipant(self.participant)\n if group: \n member_avail = filter(lambda x:x.status == LISTEN and x.name != self.name,group.members)\n if member_avail:\n member = member_avail.pop()\n name = member.name\n else:\n self.notLoggedIn()\n return\n if params.has_key('text'): text = params['text']\n else: return\n\n logger.writeLog(\"%s@%s said:'%s'\" % (self.name,self.transport.hostname,text))\n \n if self.participant:\n msgMethod = self.participant.directMessage\n try:\n self.service.sendParticipants(self.name,\"botmsg\",{\"text\":text,\"sender\":self.name})\n msgMethod(name,text)\n except:\n self.receiveDirectCommand(\"msg\",{\"sender\":\"MsgServ\",\"text\":\"cant send text, probably there is no user to listen\"})\n else:\n self.notLoggedIn()",
"def sendChatMessage(self, msg):\n self.transport.write(msg)",
"def SendMessage(service, user_id, message):\n\n message_resp = (service.users().messages().send(userId=user_id, body=message).execute())\n print(\"Sucessfull!!! \", message_resp)",
"def proceed_chatting_message(msg: telebot.types.Message):\n _, user, receiver = utils.get_game_user_opponent(msg.from_user)\n bot.send_message(\n receiver.user_id,\n f'**{user.first_name}:** __{msg.text}__',\n parse_mode='Markdown',\n )",
"def send_to(self, target, msg):\n\t\tif self.cid is None:\n\t\t\traise UsageError(\"Not in a group!\")\n\t\tidb, payload = msg[0], msg[1:]\n\t\tself.sendMessage(idb + chr(target) + payload, True)",
"async def contact(self, ctx, *, message : str):\n # [p]contact\n\n if not User.objects.get(is_owner=True).exists():\n await self.bot.say(\"I have no owner set.\")\n return\n owner = User.objects.get(is_owner=True)[0].id\n author = ctx.message.author\n if ctx.message.channel.is_private is False:\n server = ctx.message.server\n source = \", server **{}** ({})\".format(server.name, server.id)\n else:\n source = \", direct message\"\n sender = \"From **{}** ({}){}:\\n\\n\".format(author, author.id, source)\n message = sender + message\n try:\n await self.bot.send_message(owner, message)\n except discord.errors.InvalidArgument:\n await self.bot.say(\"I cannot send your message, I'm unable to find\"\n \" my owner... *sigh*\")\n except discord.errors.HTTPException:\n await self.bot.say(\"Your message is too long.\")\n except:\n await self.bot.say(\"I'm unable to deliver your message. Sorry.\")\n else:\n await self.bot.say(\"Your message has been sent.\")",
"def send_message(request):\n sender = request.user\n recipient_username = request.POST.get('to')\n recipient = get_user_model().objects.get(email__iexact=recipient_username)\n message = request.POST.get('message')\n if len(message.strip()) == 0:\n return Response({})\n\n if sender != recipient:\n msg = Message.send_message(sender, recipient, message)\n return Response({'message': msg})\n\n return Response({})",
"def send_message(userid):\n\tsc.api_call(\n\t\t\"chat.postMessage\",\n\t\tchannel=userid,\n\t\ttext=\"Hey there, just wanted to remind you to join <#CQCKS8UN6|secret-snowflake-fa19> by Wednesday night, if you want to participate in Secret Santa this year. It will be lots of fun!\",\n\t\tusername=\"Reminder\",\n\t\ticon_emoji=\":santa:\"\n\t)",
"def send(self, msg):\n #assert(isinstance(msg, Message))\n\n msg = envelp(msg, self.get_msg_id())\n self.send_raw(msg)\n\n # TODO: Fix this: this little delay is to be able to\n # send messages one after the other\n #\n # without this delay, following code is not working:\n #\n # the_actor.send({'a': 'message'})\n # the_actor.send({'a': 'different message'})\n #\n gevent.sleep(0.000000000000000000000000001)",
"def send(self, msg: str):\n\t\tself.client.send(msg.encode())",
"def send_email(msg):\n\tprint(\"sendEmail: \" + msg)",
"def send_msg(self, msg):\n self.msg_queue.put(dict(to=settings.IOTTLY_XMPP_SERVER_USER,msg='/json ' + json.dumps(msg)))",
"def send_msg(self, match_id, msg):\n endpoint = '/user/matches/%s' % match_id\n params = {\n \"message\": msg\n }\n return self.post_request(endpoint, params)",
"def sendMsg(self, chat, msg):\n try:\n self.chats[chat].SendMessage(msg)\n return \"Message sent\\n\"\n except KeyError:\n raise RuntimeError(\"No chat %s\" % chat)",
"def send(self, msg: Message, **kwargs):\n\n pass",
"def send_personal_message(msg, chat_id):\n try:\n print(f\"send message to : {chat_id}\")\n bot = telegram.Bot(token=token)\n bot.sendMessage(chat_id=chat_id, text=msg)\n return True\n except Exception as error:\n print(f\"Error in send_personal_message : {error}\")\n return False"
]
| [
"0.75077295",
"0.75053024",
"0.72851026",
"0.71197087",
"0.70939976",
"0.70884967",
"0.706698",
"0.69710916",
"0.6948751",
"0.6948751",
"0.6948751",
"0.69296354",
"0.686146",
"0.6788958",
"0.6767372",
"0.6760855",
"0.67388237",
"0.66719127",
"0.6645962",
"0.66444427",
"0.6642241",
"0.6616771",
"0.6604829",
"0.6599841",
"0.65983516",
"0.65980476",
"0.6587668",
"0.6584014",
"0.6576056",
"0.65547675"
]
| 0.75329095 | 0 |
Endpoint to receive new device datapoint, updates top devices cache. | def devicedata():
data = request.get_json()
dd = DeviceData(**data)
db.session.add(dd)
db.session.commit()
# update cache when write is confirmed, updates corresponding maxheaps
num_top_devices = int(environ.get('NUM_TOP_DEVICES'))
for feature in DeviceData.features():
# negate feature value to keep list reversed for efficient .pop()
device_item = [-getattr(dd, feature), dd.deviceId, dd.to_dict()]
for itv in intervals:
key = "_".join([feature, itv])
cache = json.loads(memcached.get(key))
# if device already in cache, replace val if larger (pop & insort)
try:
idx = [dd_dict["deviceId"]
for _, _, dd_dict in cache["minmaxes"]].index(dd.deviceId)
if device_item > cache["minmaxes"][idx]:
cache["minmaxes"].pop(idx)
bisect.insort(cache["minmaxes"], device_item)
# otherwise, insort new item if len(cache) < NUM_TOP_DEVICES
# OR if device_item > minmaxes[0]. -> insort and pop last
except ValueError:
if len(cache["minmaxes"]) < num_top_devices:
bisect.insort(cache["minmaxes"], device_item)
cache["timestamp"] = dd.timestamp.isoformat()
memcached.set(key, json.dumps(cache))
elif device_item > cache["minmaxes"][0]:
bisect.insort(cache["minmaxes"], device_item)
cache["minmaxes"].pop()
cache["timestamp"] = dd.timestamp.isoformat()
memcached.set(key, json.dumps(cache))
return jsonify(dd.to_dict()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self):\n self.device = self._api.device_query(self._hardware_address, {})",
"def async_update_device(self) -> None:",
"def dashboard():\n features = request.args.get('features', None)\n interval = request.args.get('interval', \"all_time\")\n features = DeviceData.features() if features is None else features.split(\",\")\n\n if interval == \"past_minute\":\n cutoff_time = dt.now() - timedelta(minutes=1)\n elif interval == \"past_hour\":\n cutoff_time = dt.now() - timedelta(minutes=60)\n else:\n cutoff_time = None\n\n response = {}\n for feature in features:\n key = \"_\".join([feature, interval])\n cache = json.loads(memcached.get(key))\n\n # cache hit based on timestamp of oldest record\n cache_timestamp = dt.fromisoformat(cache[\"timestamp\"])\n if cutoff_time and cache_timestamp >= cutoff_time:\n top_readings = [dd_dict for _, _, dd_dict in cache[\"minmaxes\"]]\n\n # cache miss, query and update memcached\n else:\n num_top_devices = int(environ.get('NUM_TOP_DEVICES'))\n # get the records by max feature value, distinct on deviceId\n # max value over all time\n if cutoff_time is None:\n tr_query = db.session.query(\n DeviceData.deviceId,\n func.max(getattr(DeviceData, feature))\n ).group_by(DeviceData.deviceId)\n else:\n tr_query = db.session.query(\n DeviceData.deviceId,\n func.max(getattr(DeviceData, feature))\n ).group_by(\n DeviceData.deviceId\n ).filter(DeviceData.timestamp >= cutoff_time)\n\n # get the full records to return\n top_records = tr_query.limit(num_top_devices).all()\n device_data = DeviceData.query.filter(\n tuple_(DeviceData.deviceId, getattr(DeviceData, feature)).in_(top_records)\n )\n top_readings = sorted(\n [dd.to_dict() for dd in device_data],\n reverse=True,\n key=(lambda x: x[feature])\n )\n # update memcached values with db query results\n timestamp, array = dt.now().isoformat(), []\n if len(top_readings) > 0:\n timestamp = min(x[\"timestamp\"] for x in top_readings)\n array = [\n [-dd_dict[feature], dd_dict[\"deviceId\"], dd_dict]\n for dd_dict in top_readings]\n memcached.set(\n key,\n json.dumps({\"timestamp\": timestamp, \"minmaxes\": array})\n )\n response[feature] = top_readings\n\n return jsonify(response)",
"def update_device_list(self):\n\n # Update devices via HTTP request (basic device data - no status)\n self.__http_update_device_list()\n\n # Fetch status for each known device via MQTT\n for gdev in self.__devices.values():\n gdev.request_status()",
"async def async_update_device_info(self) -> None:\n data = await self._async_request(\"get\", \"device\")\n self._device_info = cast(Dict[str, Any], data)",
"def update(self):\n self.device.update()",
"def update(self):\n self.device.update()",
"def update(self):\n if self._device.age() > 5:\n # Only poll device if last update was more than 5 seconds ago\n self.request_temp()\n return",
"def update(self):\n url = 'https://airapi.airly.eu/v2/measurements/point' \\\n '?lat={}&lng={}&maxDistanceKM=2'.format(self._latitude,\n self._longitude)\n headers = {'Accept': CONTENT_TYPE_JSON, 'apikey': self._token}\n request = requests.get(url, headers=headers)\n _LOGGER.debug(\"New data retrieved: %s\", request.status_code)\n if request.status_code == HTTP_OK and request.content.__len__() > 0:\n if (request.json()['current']['indexes'][0]['description'] ==\n ATTR_NO_SENSOR_AVAILABLE):\n _LOGGER.error(ATTR_NO_SENSOR_AVAILABLE)\n else:\n self.get_data(request.json())",
"async def _device_refresh(self, **kwargs):\n\n device_id = self._device_id\n if not device_id:\n return\n\n api_device = f\"{API_DEVICES}/{device_id}\"\n api_command = f\"{api_device}/commands\"\n\n if self._use_channel_info:\n async with self._session.post(\n api_command,\n headers=_headers(self._api_key),\n data=_command(COMMAND_REFRESH),\n raise_for_status=False,\n ) as resp:\n if resp.status == 409:\n self._state = STStatus.STATE_OFF\n return\n resp.raise_for_status()\n await resp.json()\n\n return",
"def __http_update_device_list(self):\n\n # Make sure we are (still) logged in\n self.__login_if_required()\n\n # Fetch all devices from Govee\n req = {\n 'key': '',\n 'transaction': self.__current_milli_time(),\n 'view': 0\n }\n res = self.__http_post(req, '/device/rest/devices/v1/list')\n\n # Response:\n \"\"\"\n {\n \"devices\": [\n {\n \"device\": \"AA:BB:CC:DD:EE:FF:11:22\",\n \"deviceExt\": {\n \"deviceSettings\": \"{\\\"wifiName\\\":\\\"MyWifi\\\",\\\"address\\\":\\\"CC:DD:EE:FF:11:22\\\",\\\"bleName\\\":\\\"ihoment_H6159_XXXX\\\",\\\"topic\\\":\\\"GD/123467890123467890123467890\\\",\\\"sku\\\":\\\"H6159\\\",\\\"device\\\":\\\"AA:BB:CC:DD:EE:FF:11:22\\\",\\\"deviceName\\\":\\\"Kitchen light\\\",\\\"versionHard\\\":\\\"1.00.01\\\",\\\"versionSoft\\\":\\\"1.02.14\\\"}\",\n \"extResources\": \"{\\\"skuUrl\\\":\\\"\\\",\\\"headOnImg\\\":\\\"\\\",\\\"headOffImg\\\":\\\"\\\",\\\"ext\\\":\\\"\\\"}\",\n \"lastDeviceData\": \"{\\\"online\\\":false}\"\n },\n \"deviceName\": \"Kitchen light\",\n \"goodsType\": 0,\n \"sku\": \"H6159\",\n \"versionHard\": \"1.00.01\",\n \"versionSoft\": \"1.02.14\"\n },\n {\n \"device\": \"A2:B2:C3:D4:E5:F6:77:88\",\n \"deviceExt\": {\n \"deviceSettings\": \"{\\\"wifiName\\\":\\\"MyWifi\\\",\\\"address\\\":\\\"C3:D4:E5:F6:77:88\\\",\\\"bleName\\\":\\\"ihoment_H6163_YYYY\\\",\\\"topic\\\":\\\"GD/123467890123467890123467890\\\",\\\"sku\\\":\\\"H6163\\\",\\\"device\\\":\\\"A2:B2:C3:D4:E5:F6:77:88\\\",\\\"deviceName\\\":\\\"Living room\\\",\\\"versionHard\\\":\\\"1.00.01\\\",\\\"versionSoft\\\":\\\"1.02.14\\\"}\",\n \"extResources\": \"{\\\"skuUrl\\\":\\\"\\\",\\\"headOnImg\\\":\\\"\\\",\\\"headOffImg\\\":\\\"\\\",\\\"ext\\\":\\\"\\\"}\",\n \"lastDeviceData\": \"{\\\"online\\\":false}\"\n },\n \"deviceName\": \"Living room\",\n \"goodsType\": 0,\n \"sku\": \"H6163\",\n \"versionHard\": \"1.00.01\",\n \"versionSoft\": \"1.02.14\"\n }\n ],\n \"message\": \"\",\n \"status\": 200\n }\n \"\"\"\n\n # Check response status\n if res['status'] != 200:\n raise GoveeException('Govee answered with device list status {}'.format(res['status'])) \n\n for raw_device in res['devices']:\n identifier = raw_device['device']\n sku = raw_device['sku']\n if not identifier or not sku:\n continue\n name = raw_device['deviceName']\n device_settings = json.loads(raw_device['deviceExt']['deviceSettings'])\n device_settings_keys = device_settings.keys()\n if not 'address' in device_settings_keys and not 'topic' in device_settings_keys:\n continue\n topic = device_settings['topic']\n\n if identifier in self.__devices.keys():\n device = self.__devices[identifier]\n device._name = name\n else:\n device_factory = self.__get_device_factory(sku)\n if not device_factory:\n continue\n last_device_data = json.loads(raw_device['deviceExt']['lastDeviceData'])\n if 'online' in last_device_data.keys():\n if last_device_data['online']:\n iot_connected = dev.IotConnectionStatus.ONLINE\n else:\n iot_connected = dev.IotConnectionStatus.OFFLINE\n elif not 'wifiName' in device_settings:\n iot_connected = dev.IotConnectionStatus.NO_IOT\n else:\n iot_connected = dev.IotConnectionStatus.UNKNOWN\n device = device_factory.build(self, identifier, topic, sku, name, iot_connected)\n if device:\n self.__devices[identifier] = device\n self.on_new_device(self, device, raw_device)",
"def update_see(self):\n _LOGGER.debug(\"Updating device tracker: %s\", self._name)\n self._see(\n dev_id=self.dev_id,\n host_name=self.name,\n battery=self.battery,\n gps=(self.lat, self.lon),\n attributes={\n 'status': self.status,\n 'id': self.dev_id,\n 'name': self.name,\n CONF_ICON: self.icon,\n 'vendor': VENDOR,\n 'model': self.model})",
"def update(self):\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind(('', DISCOVERY_PORT))\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.settimeout(DISCOVERY_TIMEOUT.seconds)\n\n # send query to every device in every network connected to\n LOG.debug('querying hosts on networks: %s', self.networks)\n for network in self.networks:\n for address in network.hosts():\n try:\n sock.sendto(DISCOVERY_PAYLOAD,\n (str(address), DISCOVERY_PORT))\n except OSError as exc:\n LOG.debug('failed to send request', exc_info=exc)\n continue\n\n # wait for responses\n while True:\n try:\n data, _ = sock.recvfrom(64)\n except socket.timeout:\n # no (more) responses received\n break\n\n # skip our own outgoing packet\n if data == DISCOVERY_PAYLOAD:\n continue\n\n # data = ip_address,id,model\n data = data.decode('ascii').split(',')\n if len(data) < 3:\n continue\n\n entry = tuple(data)\n\n if entry not in self.entries:\n self.entries.append(entry)\n\n sock.close()",
"def update(self):\n self._device.update()",
"def _push_to_server(self) -> None:\n timestamp = int(arrow.get().float_timestamp * 1000)\n\n datapoints: List[Dict[str, Union[str, List[Tuple[float, float]]]]] = []\n\n for metric in REGISTRY.collect():\n if type(metric) == Metric and metric.type in [\"gauge\", \"counter\"]:\n if len(metric.samples) == 0:\n continue\n\n external_id = self.external_id_prefix + metric.name\n datapoints.append({\"externalId\": external_id, \"datapoints\": [(timestamp, metric.samples[0].value)]})\n\n self.cdf_client.datapoints.insert_multiple(datapoints)\n self.logger.debug(\"Pushed metrics to CDF tenant '%s'\", self._cdf_project)",
"def update_device_info(self, device_info: ChargingStationInfo) -> None:\r\n # Stop periodic requests\r\n self.stop_periodic_request()\r\n\r\n # Exchange device info\r\n self.device_info = device_info\r\n\r\n # Start periodic requests if enabled\r\n if self._periodic_request_enabled:\r\n self._polling_task = self._loop.create_task(self._periodic_request())",
"def update(self):\n self._device = self._geizhals.parse()",
"def flask_get_devices():\n try:\n # retrieve the authorization token\n token = retrieve_auth_token(request)\n\n # retrieve pagination\n page_number, per_page = get_pagination(request)\n\n params = {\n 'page_number': page_number,\n 'per_page': per_page,\n 'sortBy': request.args.get('sortBy', None),\n 'attr': request.args.getlist('attr'),\n 'attr_type': request.args.getlist('attr_type'),\n 'label': request.args.get('label', None),\n 'template': request.args.get('template', None),\n 'idsOnly': request.args.get('idsOnly', 'false'),\n }\n\n result = DeviceHandler.get_devices(token, params)\n LOGGER.info(f' Getting latest added device(s).')\n\n return make_response(jsonify(result), 200)\n except HTTPRequestError as e:\n LOGGER.error(f' {e.message} - {e.error_code}.')\n if isinstance(e.message, dict):\n return make_response(jsonify(e.message), e.error_code)\n\n return format_response(e.error_code, e.message)",
"def _update_data(self) -> dict | None:\n\n return self.switchbot_api.GetSwitchbotDevices().discover(\n retry=self.retry_count, scan_timeout=self.scan_timeout\n )",
"def add(self, device_id, data):\n with self.lock:\n self.devices[device_id] = data",
"def update_data(self):\n data, meta_data = ts.get_daily(symbol=self.stock_ticker, outputsize='full')\n self.data = data\n self.meta_data = meta_data",
"def test_get_measurement_history(self):\n device = DeviceFactory(node=Node.objects.first(), external_id='123', type__code=SecureDeviceType.SRT321,\n device_param__type__code=SecureDeviceParameterType.MEASURED_TEMPERATURE)\n d_id_1 = device.external_id\n\n now_loc = datetime.datetime.now(bst)\n ts_loc = now_loc - datetime.timedelta(seconds=30)\n ts_str = ts_loc.strftime('%Y-%m-%dT%H:%M:%S')\n\n data = self.create_secure_server_push_data(d_id_1, ts_str)\n\n SecureClient.process_push_data(data)\n time.sleep(.5)\n\n # get newer timestamp\n ts_str = now_loc.strftime('%Y-%m-%dT%H:%M:%S')\n data = self.create_secure_server_push_data(d_id_1, ts_str, value=\"23.5\")\n\n SecureClient.process_push_data(data)\n\n token = Token.objects.get(user__username=email)\n device_param = device.parameters.first()\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n url = reverse('api:device_measurements', kwargs={'device_parameter_id': device_param.id})\n\n time.sleep(.5)\n\n response = client.get(url, format='json')\n\n self.assertTrue(response.status_code == 200)\n self.assertTrue(len(response.data) >= 2)",
"def refreshData(self):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"refreshData() method called.\")\n\n try:\n # Check to see if there have been any devices created.\n if indigo.devices.itervalues(filter=\"self\"):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Updating data...\")\n\n for dev in indigo.devices.itervalues(filter=\"self\"):\n self.refreshDataForDev(dev)\n\n else:\n indigo.server.log(u\"No Emby Client devices have been created.\")\n\n return True\n\n except Exception as error:\n self.errorLog(u\"Error refreshing devices. Please check settings.\")\n self.errorLog(unicode(error))\n return False",
"def update(self) -> None:\n\n try:\n request = requests.get(self.url, timeout=10)\n except requests.exceptions.RequestException as err:\n _LOGGER.error(\"No connection to endpoint: %s\", err)\n else:\n doc = xmltodict.parse(request.text)\n mtus = int(doc[\"LiveData\"][\"System\"][\"NumberMTU\"])\n\n for mtu in range(1, mtus + 1):\n power = int(doc[\"LiveData\"][\"Power\"][\"MTU%d\" % mtu][\"PowerNow\"])\n voltage = int(doc[\"LiveData\"][\"Voltage\"][\"MTU%d\" % mtu][\"VoltageNow\"])\n\n self.data[mtu] = {\n UnitOfPower.WATT: power,\n UnitOfElectricPotential.VOLT: voltage / 10,\n }",
"def getDeviceList(self):\r\n\r\n self._logger.debug(\"In getDeviceList()...\")\r\n\r\n # update the security token if needed \r\n if self._checkToken():\r\n\r\n response = self._callAPI(_API_GET_DEVICE_LIST, useSession=True)\r\n\r\n if response is not None:\r\n\r\n deviceInfo = response.json()\r\n \r\n if response.status_code == 200 and \"items\" in deviceInfo:\r\n\r\n deviceList = []\r\n\r\n for dev in deviceInfo[\"items\"]:\r\n\r\n # pull out common attributes\r\n deviceID = dev[\"serial_number\"]\r\n deviceType = dev[\"device_family\"]\r\n description = dev.get(\"name\", deviceType + \" \" + deviceID[-4:])\r\n\r\n # uncomment the next line to inspect the devices returned from the MyQ service\r\n self._logger.debug(\"Device Found - Device ID: %s, Device Type: %s, Description: %s\", deviceID, deviceType, description)\r\n\r\n # add device to the list with properties based on type\r\n if deviceType == API_DEVICE_TYPE_GATEWAY:\r\n\r\n # get gateway attributes\r\n online = dev[\"state\"][\"online\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add gateway device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"description\": description,\r\n \"online\": online,\r\n \"last_updated\": lastUpdated\r\n })\r\n\r\n elif deviceType == API_DEVICE_TYPE_OPENER:\r\n \r\n # get the door attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"door_state\"]\r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add garage door opener device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n elif deviceType == API_DEVICE_TYPE_LAMP:\r\n\r\n # get the lamp attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"lamp_state\"] \r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add lamp device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n return deviceList\r\n \r\n elif response.status_code == 401:\r\n \r\n self._logger.error(\"There was an authentication error with the MyQ account: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n \r\n self._logger.error(\"Error retrieving device list: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n # Error logged in _callAPI function\r\n return None\r\n\r\n else:\r\n # Check token failed - wait and see if next call successful\r\n return None",
"async def async_update(self) -> None:\n await self._api.async_update_device_info()",
"async def get_device_list(self):\n self.logger.debug(\"Retrieving device list information.\")\n #url = 'https://{}/api/user/device'.format(self.apiHost) #suddenly stopped worrking, so use\n '''\n #full version\n url = 'https://{}/api/user/device?lang=en&apiKey={}&getTags=1&version={}&ts={}&nonce={}&appid={}&imei={}&os={}&model={}&romVersion={}&appVersion={}'.format(self.apiHost,\n self.apikey,\n self.timestamp,\n self._version,\n self._nonce,\n self._appid,\n self._imei,\n self._os,\n self._model,\n self._romVersion,\n self._appVersion)\n '''\n url = 'https://{}/api/user/device?version={}&appid={}'.format(self.apiHost, self._version, self._appid)\n headers = {\n 'Authorization': 'Bearer %s' % self.authenticationToken,\n }\n self.logger.debug('url: %s, headers: %s' % (url, headers))\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n json_response = await response.json()\n \n self.logger.debug('received response status: %s' % response.status) \n self.logger.debug('received response: %s' % self.pprint(json_response))\n if response.status != 200:\n self.logger.error('error: %s received' % response.status)\n return\n \n if json_response.get(\"devicelist\"):\n self.logger.info('New response format found')\n json_response = json_response[\"devicelist\"]\n \n self.logger.debug('number of device(s) is: %d' % len(json_response))\n \n self._devices = json_response #list of devices and current configurations\n \n self._create_client_devices()\n \n '''\n Example Response:\n [\n {\n \"__v\": 0,\n \"_id\": \"5becffa6d2b4a3c34cb79b38\",\n \"apikey\": \"530303a6-cf2c-4246-894c-xxxxxxxxxxx\",\n \"brandName\": \"AUTOSLIDE\",\n \"createdAt\": \"2018-11-15T05:09:58.341Z\",\n \"deviceStatus\": \"\",\n \"deviceUrl\": \"\",\n \"deviceid\": \"100050xxxxx\",\n \"devicekey\": \"4123ec79-d2c3-4d32-930a-xxxxxxxxxxxxx\",\n \"extra\": {\n \"_id\": \"xxxxxxxxxxxxxxxx\",\n \"extra\": {\n \"apmac\": \"xx:xx:xx:xx:xx:xx\",\n \"brandId\": \"5a6fcf00f620073c67efc280\",\n \"description\": \"20180813001\",\n \"mac\": \"xx:xx:xx0:xx:xx:xx\",\n \"manufacturer\": \"\\u9752\\u5c9b\\u6fb3\\u601d\\u5fb7\\u667a\\u80fd\\u95e8\\u63a7\\u7cfb\\u7edf\\u6709\\u9650\\u516c\\u53f8\",\n \"model\": \"PSA-BTA-GL\",\n \"modelInfo\": \"5af3f5332c8642b001540dac\",\n \"ui\": \"\\u63a8\\u62c9\\u5ba0\\u7269\\u95e8\",\n \"uiid\": 54\n }\n },\n \"group\": \"\",\n \"groups\": [],\n \"ip\": \"xxx.xx.xx.xxx\",\n \"location\": \"\",\n \"name\": \"Patio Door\",\n \"offlineTime\": \"2018-12-31T07:23:31.018Z\",\n \"online\": true,\n \"onlineTime\": \"2018-12-31T12:19:33.216Z\",\n \"params\": {\n \"a\": \"3\",\n \"b\": \"3\",\n \"c\": \"1\",\n \"d\": \"1\",\n \"e\": \"1\",\n \"f\": \"1\",\n \"fwVersion\": \"2.0.2\",\n \"g\": \"0\",\n \"h\": \"1\",\n \"i\": \"0\",\n \"j\": \"00\",\n \"k\": \"0\",\n \"l\": \"1\",\n \"m\": \"2\",\n \"n\": \"0\",\n \"rssi\": -53,\n \"staMac\": \"xx:xx:xx:xx:xx:xx\"\n },\n \"productModel\": \"WFA-1\",\n \"settings\": {\n \"alarmNotify\": 1,\n \"opsHistory\": 1,\n \"opsNotify\": 0\n },\n \"sharedTo\": [\n {\n \"note\": \"\",\n \"permit\": 15,\n \"phoneNumber\": \"[email protected]\",\n \"shareTime\": 1542259546087\n }\n ],\n \"showBrand\": true,\n \"type\": \"10\",\n \"uiid\": 54\n }\n ]\n \n or New format:\n {\n \"devicelist\": [\n {\n \"__v\": 0,\n \"_id\": \"5c3665d012d28ae6ba4943c8\",\n \"apikey\": \"530303a6-cf2c-4246-894c-50855b00e6d8\",\n \"brandLogoUrl\": \"https://us-ota.coolkit.cc/logo/KRZ54OifuGmjoEMxT1YYM3Ybu2fj5K2C.png\",\n \"brandName\": \"Sonoff\",\n \"createdAt\": \"2019-01-09T21:21:20.402Z\",\n \"devConfig\": {},\n \"devGroups\": [],\n \"deviceStatus\": \"\",\n ... as before\n '''",
"def get(self):\n try:\n log.debug(\"Device info : \")\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\":\"SELECT DISTINCT(deviceId) FROM(SELECT deviceId,q1 FROM \\\"ttd_devices\\\" ) \" }\n response = requests.request(\"GET\", url, params=querystring)\n r_d=json.loads(response.text)\n result_d=[]\n for rec in r_d['results'][0]['series']:\n for element in rec['values']:\n result_d.append(element[1])\n result={}\n result['status'] = 1\n result['message']=result_d\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while fetching device list')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while fetching the device data')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while fetching devie data'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp",
"def update(self):\n self.dht_client.update()\n temperature_offset = self.temperature_offset\n humidity_offset = self.humidity_offset\n data = self.dht_client.data\n\n sensor_type = self.entity_description.key\n if sensor_type == SENSOR_TEMPERATURE and sensor_type in data:\n temperature = data[SENSOR_TEMPERATURE]\n _LOGGER.debug(\n \"Temperature %.1f \\u00b0C + offset %.1f\",\n temperature,\n temperature_offset,\n )\n if -20 <= temperature < 80:\n self._attr_native_value = round(temperature + temperature_offset, 1)\n elif sensor_type == SENSOR_HUMIDITY and sensor_type in data:\n humidity = data[SENSOR_HUMIDITY]\n _LOGGER.debug(\"Humidity %.1f%% + offset %.1f\", humidity, humidity_offset)\n if 0 <= humidity <= 100:\n self._attr_native_value = round(humidity + humidity_offset, 1)",
"def _update_device_attributes_on_backend(self):\n if self.is_paired:\n LOG.info('Sending updated device attributes to the backend...')\n try:\n api = DeviceApi()\n api.update_version()\n except Exception:\n self._notify_backend_down()"
]
| [
"0.61019975",
"0.5817862",
"0.58065313",
"0.5732114",
"0.57284003",
"0.561221",
"0.561221",
"0.5596885",
"0.55961233",
"0.5561533",
"0.5548863",
"0.5475346",
"0.5436901",
"0.5418248",
"0.53638417",
"0.52888125",
"0.5253732",
"0.5250218",
"0.5242439",
"0.5213964",
"0.52058274",
"0.51947016",
"0.51735026",
"0.5159292",
"0.5137959",
"0.51288307",
"0.51215047",
"0.51176417",
"0.50942767",
"0.5076267"
]
| 0.64702433 | 0 |
Retrieve top devices per feature over interval | def dashboard():
features = request.args.get('features', None)
interval = request.args.get('interval', "all_time")
features = DeviceData.features() if features is None else features.split(",")
if interval == "past_minute":
cutoff_time = dt.now() - timedelta(minutes=1)
elif interval == "past_hour":
cutoff_time = dt.now() - timedelta(minutes=60)
else:
cutoff_time = None
response = {}
for feature in features:
key = "_".join([feature, interval])
cache = json.loads(memcached.get(key))
# cache hit based on timestamp of oldest record
cache_timestamp = dt.fromisoformat(cache["timestamp"])
if cutoff_time and cache_timestamp >= cutoff_time:
top_readings = [dd_dict for _, _, dd_dict in cache["minmaxes"]]
# cache miss, query and update memcached
else:
num_top_devices = int(environ.get('NUM_TOP_DEVICES'))
# get the records by max feature value, distinct on deviceId
# max value over all time
if cutoff_time is None:
tr_query = db.session.query(
DeviceData.deviceId,
func.max(getattr(DeviceData, feature))
).group_by(DeviceData.deviceId)
else:
tr_query = db.session.query(
DeviceData.deviceId,
func.max(getattr(DeviceData, feature))
).group_by(
DeviceData.deviceId
).filter(DeviceData.timestamp >= cutoff_time)
# get the full records to return
top_records = tr_query.limit(num_top_devices).all()
device_data = DeviceData.query.filter(
tuple_(DeviceData.deviceId, getattr(DeviceData, feature)).in_(top_records)
)
top_readings = sorted(
[dd.to_dict() for dd in device_data],
reverse=True,
key=(lambda x: x[feature])
)
# update memcached values with db query results
timestamp, array = dt.now().isoformat(), []
if len(top_readings) > 0:
timestamp = min(x["timestamp"] for x in top_readings)
array = [
[-dd_dict[feature], dd_dict["deviceId"], dd_dict]
for dd_dict in top_readings]
memcached.set(
key,
json.dumps({"timestamp": timestamp, "minmaxes": array})
)
response[feature] = top_readings
return jsonify(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ten_most_popular_devices(self):\n devices = DeviceFactory.create_batch(15)\n dates = create_dates_in_range(0, 2, '2017-05-01T00:{}:40Z')\n create_occurrences(devices[0], dates)\n\n dates = create_dates_in_range(0, 6, '2017-05-01T00:{}:40Z')\n create_occurrences(devices[1], dates)\n\n dates = create_dates_in_range(0, 10, '2017-05-01T00:{}:40Z')\n create_occurrences(devices[2], dates)\n\n p = PopularDevicesView()\n popular_devices_list = p.get_ten_most_popular_devices(\n parse_datetime('2017-05-01T00:10:40Z').date())\n\n self.assertEqual(devices[2].device_ref,\n popular_devices_list[0].device_ref)\n self.assertEqual(devices[1].device_ref,\n popular_devices_list[1].device_ref)\n self.assertEqual(devices[0].device_ref,\n popular_devices_list[2].device_ref)",
"def devicedata():\n data = request.get_json()\n\n dd = DeviceData(**data)\n db.session.add(dd)\n db.session.commit()\n\n # update cache when write is confirmed, updates corresponding maxheaps\n num_top_devices = int(environ.get('NUM_TOP_DEVICES'))\n for feature in DeviceData.features():\n # negate feature value to keep list reversed for efficient .pop()\n device_item = [-getattr(dd, feature), dd.deviceId, dd.to_dict()]\n for itv in intervals:\n key = \"_\".join([feature, itv])\n cache = json.loads(memcached.get(key))\n\n # if device already in cache, replace val if larger (pop & insort)\n try:\n idx = [dd_dict[\"deviceId\"]\n for _, _, dd_dict in cache[\"minmaxes\"]].index(dd.deviceId)\n if device_item > cache[\"minmaxes\"][idx]:\n cache[\"minmaxes\"].pop(idx)\n bisect.insort(cache[\"minmaxes\"], device_item)\n\n # otherwise, insort new item if len(cache) < NUM_TOP_DEVICES\n # OR if device_item > minmaxes[0]. -> insort and pop last\n except ValueError:\n if len(cache[\"minmaxes\"]) < num_top_devices:\n bisect.insort(cache[\"minmaxes\"], device_item)\n cache[\"timestamp\"] = dd.timestamp.isoformat()\n memcached.set(key, json.dumps(cache))\n elif device_item > cache[\"minmaxes\"][0]:\n bisect.insort(cache[\"minmaxes\"], device_item)\n cache[\"minmaxes\"].pop()\n cache[\"timestamp\"] = dd.timestamp.isoformat()\n memcached.set(key, json.dumps(cache))\n\n return jsonify(dd.to_dict())",
"def getTopDevices(self):\n logger.debug('Getting the list of Top Devices...')\n elements = get_elements_by_css(\"a[data-query-prepend='device eq']\")\n devices = []\n for element in elements:\n devices.append(get_text(element))\n return devices",
"def feature_list(user_id: str, session: str, tap_feature: str, task_name: str, window: DataFrame):\n if window.shape[0] == 0:\n return None\n #Add user ID, session, task name\n features = [user_id, session, task_name]\n\n #Add orientation\n orientation = mode(window['Phone_orientation_accel'])\n features.append(orientation)\n\n #Add tap type\n features.append(tap_feature)\n\n lead_file = 'Accelerometer.csv'\n\n time_col = x_columns[lead_file]\n\n before_start = window[window[tap_feature] == 4].index[0]\n during_start = window[window[tap_feature] == 2].index[0]\n after_start = window[window[tap_feature] == 3].index[0] + 1\n after_end = window[window[tap_feature] == 5].index[0]\n\n before = window.loc[before_start : during_start]\n during = window.loc[during_start : after_start]\n after = window.loc[after_start : after_end + 1]\n\n if during.shape[0] < 2:\n # If there were none or one measurements during the tap,\n # add the closest ones\n during = window[during_start - 1 : after_start + 1]\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n # Feature 1: Mean during\n mean_during = mean(during[y])\n\n # Feature 2: SD during\n sd_during = sd(during[y])\n\n # Feature 3: Difference before/after\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n difference_before_after = mean_after - mean_before\n\n # Feature 4: Net change from tap\n net_change_due_to_tap = mean_during - mean_before\n\n # Feature 5: Maximal change from tap\n max_tap = max(during[y])\n max_change = max_tap - mean_before\n\n # Feature 6: Restoration time\n avgDiffs = []\n for j in range(after[y].shape[0]):\n subsequentValues = after[y].iloc[j:]\n subsequentDistances = subsequentValues.map(lambda x: abs(x - mean_before))\n averageDistance = mean(subsequentDistances)\n avgDiffs.append(averageDistance)\n time_of_earliest_restoration = min(avgDiffs)\n restoration_time = time_of_earliest_restoration - during[time_col].iloc[-1]\n\n # Feature 7: Normalized duration\n t_before_center = (before[time_col].iloc[0] + before[time_col].iloc[-1]) / 2 \n t_after_center = (after[time_col].iloc[0] + after[time_col].iloc[-1]) / 2\n normalized_duration = (t_after_center - t_before_center) / (mean_after - mean_before)\n \n # Feature 8: Ndormalized duration max\n t_max_in_tap = during[during[y] == max_tap][time_col].iloc[0]\n normalized_duration_max = (t_after_center - t_max_in_tap) / (mean_after - max_tap)\n\n\n features += [mean_during, sd_during, difference_before_after,\n net_change_due_to_tap, max_change, restoration_time,\n normalized_duration, normalized_duration_max]\n\n if random.choice(range(100))== 0:\n plot_tap('Plots/Project/' + session, before, during, after, time_col)\n \n return features",
"def get_top_featured_entries(number=5):\n return list(Entry.published.filter(featured=True)[:number])",
"def get_top10(dataset, contrib_type):\n return dataset.order_by('-{0}'.format(contrib_type))[:10]",
"def task_6c(bt_df, rt_df, fg_df):\n top_users = rt_df.groupBy(rt_df.user_id).count().sort('count', ascending=False).take(20)\n return list(map(lambda row: (row['user_id'], row['count']), top_users))",
"def request_device_readings_median(device_uuid):\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('''\n SELECT AVG(value) FROM (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n )\n '''.format(device_uuid, device_uuid, device_uuid))\n rows = cur.fetchall()\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200",
"def categorize_data(data, top_count):\n sorted_by_tcp = sorted(\n data, key=lambda x: x['TCP Utilization'], reverse=True\n )[0:top_count]\n sorted_by_udp = sorted(\n data, key=lambda x: x['UDP Utilization'], reverse=True\n )[0:top_count]\n\n print(f\"\\nTOP-{top_count} port flooders by TCP\")\n print(tabulate(sorted_by_tcp, headers='keys', tablefmt=\"psql\"))\n print(f\"\\nTOP-{top_count} port flooders by UDP\")\n print(tabulate(sorted_by_udp, headers='keys', tablefmt=\"psql\"))",
"def get_top_rating_service(top, offset, uid):\n if top is None or not top.isdigit():\n top = DEFAULT_TOP\n if offset is None or not offset.isdigit():\n offset = DEFAULT_OFFSET\n return update_protocol_for_a_list_of_dict('image', rating_dao.get_top_rating_dao(int(top), int(offset)), uid)",
"def topic_get_hot(request, limit):\n if request.method == 'GET':\n all_topics = Topic.objects.all()\n\n if int(limit) == 0:\n hot_topics = sorted(all_topics, key=lambda t: -t.hotness)\n else:\n hot_topics = sorted(all_topics, key=lambda t: -t.hotness)[:int(limit)]\n #hot_topics = Topic.objects.order_by('hotness')[:5]\n TopicNestedSerializer.Meta.depth = 2\n RelationSerializer.Meta.depth = 1\n serializer = TopicNestedSerializer(hot_topics, many=True)\n return Response(serializer.data)",
"def sample_top_neighbors( self, max_count=200 ):\n df = self.copy()\n return df[df['neighbor'] <= max_count].coerce()",
"def get_traffic_sensor_df(sensorURI: str, fromTime: str, toTime: str, resampleFreq: str = None, remove_outliers=False):\n values = [\"count\", \"sumSpeed\"]\n result = None\n for v in values:\n # data = query_ensor(sensorURI, fromTime, toTime, v)\n data = multiday_query(sensorURI, fromTime, toTime, v)\n df = pd.DataFrame(data, columns=[\"measuredTime\", v])\n df[\"measuredTime\"] = pd.to_datetime(df[\"measuredTime\"])\n df.index = df[\"measuredTime\"]\n del df[\"measuredTime\"]\n if remove_outliers:\n z_scores = np.abs(stats.zscore(df))\n print(f\"Removed outliers: {df.size - df[(z_scores < 3).all(axis=1)].size}\")\n df = df[(z_scores < 3).all(axis=1)]\n if resampleFreq is not None:\n df = df.resample(resampleFreq).sum()\n if result is not None:\n result = pd.merge_ordered(result, df, left_on=\"measuredTime\", right_on=\"measuredTime\")\n result.index = result[\"measuredTime\"]\n del result[\"measuredTime\"]\n else:\n result = df\n # avg speed\n result[\"avgSpeed\"] = result[\"sumSpeed\"] / result[\"count\"]\n result.loc[~np.isfinite(result[\"avgSpeed\"]), \"avgSpeed\"] = np.nan\n result[\"avgSpeed\"] = result[\"avgSpeed\"].interpolate()\n return result",
"def topdia(x):\r\n return Feature(x, \"TopDia\")",
"def request_device_readings_max(device_uuid):\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # Execute the query\n cur.execute('select MAX(value) from readings where device_uuid=\"{}\"'.format(device_uuid))\n rows = cur.fetchall()\n\n # Return the JSON\n return jsonify([dict(zip(['value'], row)) for row in rows]), 200",
"def topTags(db, topN=1000):\n c=db.cursor()\n c.execute(\"\"\"\n SELECT\n tag\n FROM tags\n GROUP BY tag\n ORDER BY COUNT(*) DESC\n LIMIT %d\n \"\"\" % topN)\n tops = [tag0[0] for tag0 in c.fetchall()]\n c.close()\n return tops",
"def _tail_profile(self, db, interval):\r\n latest_doc = None\r\n while latest_doc is None:\r\n time.sleep(interval)\r\n latest_doc = db['system.profile'].find_one()\r\n\r\n current_time = latest_doc['ts']\r\n\r\n while True:\r\n time.sleep(interval)\r\n cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)\r\n for doc in cursor:\r\n current_time = doc['ts']\r\n yield doc",
"def request_device_readings_quartiles(device_uuid):\n\n # Set the db that we want and open the connection\n start = request.args.get('start')\n end = request.args.get('end')\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n #check for start\n if start != None and end != None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n start, end, \n device_uuid, \n start, end, \n device_uuid, \n device_uuid, \n start, end,\n device_uuid, \n start, end,\n device_uuid, \n device_uuid, \n start, end,\n device_uuid, \n start, end\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start != None and end == None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n start, \n device_uuid, \n start,\n device_uuid, \n device_uuid, \n start,\n device_uuid, \n start,\n device_uuid, \n device_uuid, \n start,\n device_uuid, \n start,\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start == None and end != None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n end, \n device_uuid, \n end, \n device_uuid, \n device_uuid, \n end,\n device_uuid, \n end,\n device_uuid, \n device_uuid, \n end,\n device_uuid, end\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start == None and end == None:\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n ) as T3\n '''.format(device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200",
"def topGenes(X,Y,feature_name,class_len, feature_len, method, nb_samples, device, net): \n \n input_x = torch.from_numpy(X).float().to(device)\n if method == 'Shap':\n print(\"Running Shap Model... (It may take a long time)\")\n nb_samples = nb_samples\n rand_index = np.random.choice(input_x.shape[0], nb_samples, replace=True)\n background = input_x[rand_index]\n Y_rand = Y[rand_index].reshape(-1,1)\n Y_unique,Y_counts = np.unique(Y_rand,return_counts=True)\n # Create object that can calculate shap values and explain predictions of the model\n explainer = shap.DeepExplainer(net.encoder, background)\n # Calculate Shap values, with dimension (y*N*x) y:number of labels, N number of background samples, x number of features\n shap_values = explainer.shap_values(background)\n if method =='Captum_ig':\n baseline = torch.zeros((X.shape)).to(device)\n ig = IntegratedGradients(net.encoder)\n attributions, delta = ig.attribute(input_x, baseline, target=0, return_convergence_delta=True)\n if method =='Captum_dl':\n baseline = torch.zeros((X.shape)).to(device)\n dl = DeepLift(net.encoder)\n attributions, delta = dl.attribute(input_x, baseline, target=0, return_convergence_delta=True) \n if method =='Captum_gs':\n baseline_dist = (torch.randn((X.shape))* 0.001).to(device)\n gs = GradientShap(net.encoder)\n attributions, delta = gs.attribute(input_x, stdevs=0.09, n_samples=10, \\\n baselines=baseline_dist, target=0, return_convergence_delta=True) \n \n # Use the weight differences to do rank\n if class_len ==2:\n class_len = 1\n feature_rank = np.empty((feature_len,2*class_len), dtype=object) #save ranked features and weights\n # one class vs others\n for class_index in range(class_len):\n attributions_mean_list =[]\n Y_i = Y.copy()\n Y_i[ Y_i != class_index ] = class_index+1 # change to 2 class\n Y_unique,Y_counts = np.unique(Y_i,return_counts=True)\n # repeat 2 times\n for i in Y_unique:\n if method =='Shap':\n attributions_i = torch.from_numpy(shap_values[i]).float().to(device)\n else:\n attributions_i = attributions[Y_i==i] # find all X of each class\n attributions_mean = torch.mean(attributions_i, dim =0) \n attributions_mean_list.append(attributions_mean)\n # class_weight differences \n class_weight = attributions_mean_list[0] - attributions_mean_list[1] \n attributions_weight, index_sorted = torch.sort(class_weight, descending= True)\n attributions_name = np.array([feature_name[x] for x in index_sorted])\n attributions_weight = attributions_weight.detach().cpu()\n feature_rank[:,class_index*2 ] = attributions_name\n feature_rank[:,class_index*2+1 ] = attributions_weight \n \n # Save results as DAtaFrame \n mat_head = np.array(['topGenes' if x%2==0 else 'Weights' for x in range(class_len*2)])\n mat_head = mat_head.reshape(1,-1)\n mat = np.r_[mat_head ,feature_rank ]\n mat[1:, 1] = mat[1:,1]/float(mat[1,1])\n columns = ['Class'+str(int(x/2)+1) for x in range(class_len*2)] \n ind_df = ['Attributes']+ [str(x) for x in range(feature_len)]\n res = pd.DataFrame(mat,index=ind_df,columns=columns)\n return res",
"async def get_device_info_w_slice(session: ClientSession, graphql_instance: object, **kwargs: object) -> list:\n request_results = []\n sub_request_kwargs = kwargs\n req_num = sub_request_kwargs.pop(\"req_num\", 0)\n\n sub_request_kwargs[\"category\"] = graphql_instance.__class__.__name__\n sub_request_kwargs[\"req_num\"] = f\"{req_num}\"\n\n http_response = await asyncio.create_task(send_request(session, **sub_request_kwargs))\n\n if not http_response:\n cur_devices = sub_request_kwargs[\"json\"][\"variables\"][\"productIds\"]\n if len(cur_devices) > 1:\n parts = ([\n cur_devices[:len(cur_devices) // 2],\n cur_devices[len(cur_devices) // 2:]\n ])\n for i, part in enumerate(parts):\n sub_request_kwargs_copy = copy.deepcopy(sub_request_kwargs)\n sub_request_kwargs_copy[\"json\"][\"variables\"][\"productIds\"] = part\n sub_request_kwargs_copy[\"req_num\"] = f\"{req_num}.{i}\"\n request_results.append(await get_device_info_w_slice(\n session,\n graphql_instance,\n **sub_request_kwargs_copy\n ))\n else:\n request_results.append(http_response)\n return request_results",
"def top_groups():\n groups = Group.objects.filter(country='PT').order_by('-members')[:10]\n df = pd.DataFrame.from_records(groups.values())\n return df",
"async def get_top(self, limit, iterator):\n if iterator == 'ITERATOR_LE':\n iterator = 'DESC'\n if iterator == 'ITERATOR_GE':\n iterator = 'ASC'\n sql = (\n \"select data from stickers order by (data->>1)::int {} limit {}\"\n ).format(iterator, limit)\n result = await self.db.fetch(sql)\n ret = []\n for i in result:\n ret.append(json.loads(i[0]))\n return ret",
"async def get_top_trending_tags_summary():\n # Same results, more overhead:\n #return [tag['name'] for tag in await get_trending_tags('', 50)]\n sql = \"\"\"\n SELECT category\n FROM hive_posts_cache\n WHERE is_paidout = '0'\n GROUP BY category\n ORDER BY SUM(payout) DESC\n LIMIT 50\n \"\"\"\n return query_col(sql)",
"def colocationEpochFeats(cur,uid,timestamp):\n\tj=0\n\thour=3600\n\tnearby_dev = []\n\tcloser_dev = []\n\tfor i in range(1,8):\n\t\ths_timestamp = timestamp-86400+(i-1)*hour3\n\t\the_timestamp = timestamp-86400+i*hour3\n\t\t# Determining if start/end time of given hour is in the night\n\t\t# If yes, proceed with feature calculation, if not skip\n\t\ts_epoch = epochCalc(hs_timestamp)\n\t\te_epoch = epochCalc(he_timestamp)\n\n\t\tif s_epoch[0][0]=='night' or e_epoch[0][0]=='night':\n\n\t\t\tcur.execute(\"SELECT time_stamp,mac,level FROM {0} WHERE time_stamp>= {1} AND time_stamp<={2}\"\n\t\t\t\t.format(uid+'bt',hs_timestamp,he_timestamp))\n\n\t\t\trecords = cur.fetchall()\n\t\t\t# In every BT scan all MACs share the same timestamp, thus the number of MACs\n\t\t\t# at each given time reveals the number of nearby devices which we assume \n\t\t\t# is positively correlated with the number of humans around the user.\n\t\t\t# A distinction between nearby and closer-to-user devices is being made\n\t\t\t# based on signal strength threshold\n\t\t\ttimes_near = [item[1] for item in records if item[2]<-80]\n\t\t\tnearby_dev.append( len(set(times_near)))\n\n\t\t\ttimes_closer = [item[1] for item in records if item[2]>=-80]\n\t\t\tcloser_dev.append(len(set(times_closer)))\n\n\tbt_feats = np.hstack((closer_dev,nearby_dev))\n\treturn(bt_feats)",
"def get_devices(token, params, sensitive_data=False):\n tenant = init_tenant_context(token, db)\n\n pagination = {'page': params.get('page_number'), 'per_page': params.get('per_page'), 'error_out': False}\n\n SORT_CRITERION = {\n 'label': Device.label,\n None: Device.id\n }\n sortBy = SORT_CRITERION.get(params.get('sortBy'))\n\n attr_filter = []\n query = params.get('attr')\n\n for attr_label_item in query:\n parsed = re.search('^(.+){1}=(.+){1}$', attr_label_item)\n attr_label = []\n attr_label.append(DeviceAttr.label == parsed.group(1))\n # static value must be the override, if any\n attr_label.append(text(\"coalesce(overrides.static_value, attrs.static_value)=:static_value \").bindparams(static_value=parsed.group(2)))\n attr_filter.append(and_(*attr_label))\n\n query = params.get('attr_type')\n for attr_type_item in query:\n attr_filter.append(DeviceAttr.value_type == attr_type_item)\n\n label_filter = []\n target_label = params.get('label')\n if target_label:\n label_filter.append(Device.label.like(\"%{}%\".format(target_label)))\n\n template_filter = []\n target_template = params.get('template')\n if target_template:\n template_filter.append(DeviceTemplateMap.template_id == target_template)\n\n if (attr_filter): #filter by attr\n LOGGER.debug(f\" Filtering devices by {attr_filter}\")\n\n page = db.session.query(Device) \\\n .join(DeviceTemplateMap, isouter=True)\n\n page = page.join(DeviceTemplate) \\\n .join(DeviceAttr, isouter=True) \\\n .join(DeviceOverride, (Device.id == DeviceOverride.did) & (DeviceAttr.id == DeviceOverride.aid), isouter=True)\n\n page = page.filter(*label_filter) \\\n .filter(*template_filter) \\\n .filter(*attr_filter) \\\n .order_by(sortBy) \\\n .paginate(**pagination)\n\n\n elif label_filter or template_filter: # only filter by label or/and template\n if label_filter:\n LOGGER.debug(f\"Filtering devices by label: {target_label}\")\n\n if template_filter:\n LOGGER.debug(f\"Filtering devices with template: {target_template}\") \n \n page = db.session.query(Device) \\\n .join(DeviceTemplateMap, isouter=True)\n\n if sensitive_data: #aditional joins for sensitive data\n page = page.join(DeviceTemplate) \\\n .join(DeviceAttr, isouter=True) \\\n .join(DeviceOverride, (Device.id == DeviceOverride.did) & (DeviceAttr.id == DeviceOverride.aid), isouter=True)\n\n page = page.filter(*label_filter) \\\n .filter(*template_filter) \\\n .order_by(sortBy) \\\n .paginate(**pagination)\n\n else:\n LOGGER.debug(f\" Querying devices sorted by device id\")\n page = db.session.query(Device).order_by(sortBy).paginate(**pagination)\n\n devices = []\n \n if params.get('idsOnly').lower() in ['true', '1', '']:\n return DeviceHandler.get_only_ids(page)\n\n for d in page.items:\n devices.append(serialize_full_device(d, tenant, sensitive_data))\n\n\n result = {\n 'pagination': {\n 'page': page.page,\n 'total': page.pages,\n 'has_next': page.has_next,\n 'next_page': page.next_num\n },\n 'devices': devices\n }\n\n return result",
"def get_device_summary(dataframe):\n\n print(\"Total number of points: \", len(dataframe))\n\n print(\"The number of rows from each device are as follows: \", dataframe.groupby(['device_id']).size())",
"def get_top_features(self, topFeaturesNumber, possibleFeatures, featureRequests):\n # Make dictionary of feature value pair\n possibleFeaturesMap = dict()\n for feature in possibleFeatures:\n # Initialize the value of given feature to 0\n possibleFeaturesMap[feature] = 0\n for request in featureRequests:\n # Increment the count of possibleFeature item\n possibleFeaturesMap[feature] += (request.lower()).count(feature)\n c = Counter(possibleFeaturesMap)\n # returns top topFeaturesNumber values\n return c.most_common(topFeaturesNumber)",
"def statistical_feature_extraction(window_size, signal, axis, device, subject_ID):\n\n start_running = timeit.default_timer()\n try:\n directory = f'data/row_data/{device}_{signal}/S{subject_ID}_{device}_{signal}.csv'\n sampling_rate = 20\n window_size = int(sampling_rate * window_size)\n # print(window_size)\n except:\n print('Error! Can not find such directory.')\n\n raw_signal = pd.read_csv(directory)\n win_count = 0\n total_win_count = 0\n features_for_all_windows_one_activity = []\n features_for_all_windows_all_activities = []\n column_title = f'{axis}_{device}_{signal}'\n for class_label in np.append(range(1, 14), range(15, 20)):\n activity_ID = chr(class_label + 64)\n raw_data_one_activity = np.array(raw_signal.loc[raw_signal['activity_ID'] == activity_ID, [column_title]])\n raw_data_one_activity = pd.DataFrame(raw_data_one_activity)\n\n for data_point in range(0, len(raw_data_one_activity), window_size):\n win_count += 1\n start = data_point\n end = start + window_size\n time_domain_window = raw_data_one_activity[start:end]\n\n time_mean = pd.Series(time_domain_window.mean()).rename(f'{axis}_{signal}_mean')\n time_min = pd.Series(time_domain_window.min()).rename(f'{axis}_{signal}_min')\n time_max = pd.Series(time_domain_window.max()).rename(f'{axis}_{signal}_max')\n time_std = pd.Series(time_domain_window.std()).rename(f'{axis}_{signal}_std')\n time_median = pd.Series(time_domain_window.median()).rename(f'{axis}_{signal}_median')\n time_variance = pd.Series(time_domain_window.var()).rename(f'{axis}_{signal}_variance')\n zero_crossing_rate = pd.Series(zero_crossing(time_domain_window)).rename(\n f'{axis}_{signal}_zero_crossing')\n mean_crossing = pd.Series(mean_crossing_rate(time_domain_window)).rename(\n f'{axis}_{signal}_mean_crossing')\n activity_id_ = pd.Series(activity_ID).rename('Activity_ID')\n\n features_for_one_window_one_activity = pd.concat(\n [time_mean, time_min, time_max, time_std, time_median, time_variance, zero_crossing_rate, mean_crossing,\n activity_id_], axis=1)\n features_for_all_windows_one_activity.append(features_for_one_window_one_activity)\n # print(features_for_all_windows)\n\n print('Window count', win_count)\n total_win_count += win_count\n win_count = 0\n features_for_all_windows_all_activities.append(features_for_all_windows_one_activity)\n features = pd.concat(features_for_all_windows_all_activities[0], ignore_index=False)\n print(features)\n save_as_directory = f'feature_label_tables/feature_{device}_{signal}/feature_S{subject_ID}_{axis}_{device}_{signal}.csv'\n features.to_csv(save_as_directory, encoding='utf-8', index=False)\n finish_running = timeit.default_timer()\n print('Total number of windows: ', total_win_count)\n print('Running time: ', finish_running - start_running)",
"def topCountries(top=10):\r\n #top 10 deadly countries\r\n countries = agg('country')[:top].index\r\n #grab aggregated data for these countries\r\n dataOfTop10 = agg(['year','country']).query(\"country in @countries\")### interesting...\r\n #unstack data\r\n dataOfTop10 = dataOfTop10.unstack(1)\r\n #remove multiindexes\r\n dataOfTop10 = dataOfTop10.transpose().reset_index(level=0, drop=True).transpose()\r\n #sort by year\r\n dataOfTop10.sort_index(inplace=True)\r\n return dataOfTop10",
"def get_device_list_odu_profiling(ip_address, mac_address, selected_device):\n # This is a empty list variable used for storing the device list\n device_list = []\n device_type = selected_device\n device_list_state = \"enabled\"\n global sqlalche_obj\n # try block starts\n try:\n # here we create the session of sqlalchemy\n sqlalche_obj.sql_alchemy_db_connection_open()\n # this is the query which returns the multidimensional array of hosts\n # table and store in device_tuple\n device_tuple = sqlalche_obj.session.query(\n Hosts.host_id, Hosts.host_alias, Hosts.ip_address, Hosts.mac_address).filter(and_(Hosts.is_deleted == 0, Hosts.ip_address.like('%s%%' % (ip_address)),\n Hosts.mac_address.like('%s%%' % (mac_address)), Hosts.device_type_id == device_type)).order_by(Hosts.host_alias).order_by(Hosts.ip_address).all()\n sqlalche_obj.sql_alchemy_db_connection_close()\n total_record = len(device_tuple)\n\n # this loop create a mutildimesional list of host\n if total_record == 0:\n return 0\n elif total_record > 1:\n return 1\n else:\n return str(device_tuple[0][0])\n\n # try block ends\n\n # exception starts\n except Exception as e:\n sqlalche_obj.sql_alchemy_db_connection_close()\n return 2\n finally:\n sqlalche_obj.sql_alchemy_db_connection_close()"
]
| [
"0.60265946",
"0.594315",
"0.58434504",
"0.5637146",
"0.5609201",
"0.5511789",
"0.53965575",
"0.533939",
"0.52934563",
"0.52539307",
"0.52156544",
"0.5195858",
"0.51910466",
"0.51692164",
"0.5161478",
"0.5159812",
"0.5155001",
"0.5145687",
"0.51430875",
"0.512167",
"0.5112263",
"0.51061493",
"0.509415",
"0.5078009",
"0.5064617",
"0.50611514",
"0.5051777",
"0.5035521",
"0.5020734",
"0.50181466"
]
| 0.69374967 | 0 |
this runs the script to ask for a zip code and then print out the town location and population | def run_script():
var=raw_input("Enter a Zipcode: ")
address='http://www.uszip.com/zip/'+var
conn=urllib.urlopen(address)
t=[]
for line in conn.fp:
line=line.strip()
if '<title>' in line:
line.split()
print line[7:-16]
if 'Total population' in line:
line=line.strip('z')
loc=line.index('Total population')
loc2=line.index('<span')
print line[(loc+25):loc2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main(postalcode):\n places = postalcodes_mexico.places(postalcode)\n click.echo(places)\n return 0",
"def database():\n\n zip_to_tuple, town_to_zip = query()\n\n l = ''\n\n while l != quit:\n\n l = input(\"Type in a zip code, a town name, or quit:\\n\")\n\n if l == 'quit':\n break\n\n elif l.isdigit(): # zip code\n\n if l in zip_to_tuple:\n for i in zip_to_tuple[l]:\n try:\n print (format(i[0], i[1], i[2], i[3], i[4]))\n except:\n print (\"Cannot print line due to enconding problems.\\n\")\n\n else:\n print (\"Zip code {0} is not on the database.\\n\".format(l))\n\n else: # town name\n\n if l in town_to_zip:\n\n for i in town_to_zip[l]:\n for j in zip_to_tuple[i]:\n try:\n print (format(j[0], j[1], j[2], j[3], j[4]))\n except:\n print (\"Cannot print line due to enconding problems.\\n\")\n\n else:\n print (\"Town {0} is not on the database.\\n\".format(l))",
"def get_zipsearch(zipcode=u''):\n from x84.bbs import getterminal, LineEditor, echo\n term = getterminal()\n echo(u''.join((u'\\r\\n\\r\\n',\n term.bold_yellow(u' -'),\n term.reverse_yellow(u':'),\n u' ')))\n return LineEditor(width=min(30, term.width - 5), content=zipcode).read()",
"def inputZip() -> int:\n while True:\n try:\n return int(input(\"Enter your zipcode for concerts near you: \"))\n except ValueError:\n print(\"Input only accepts numbers.\")",
"def geocode_zip():\n\n # Get user location \n zipcode = request.args.get('zipcode')\n location_result = client.geocode(zipcode)\n\n # Save needed geolocation in the session\n session['lat'] = location_result[\"results\"][0][\"location\"][\"lat\"]\n session['lng']= location_result[\"results\"][0][\"location\"][\"lng\"]\n\n city = location_result[\"results\"][0][\"address_components\"][\"city\"]\n state = location_result[\"results\"][0][\"address_components\"][\"state\"]\n session['user_facing_location'] = city + \", \" + state\n\n return jsonify(location_result)",
"def get_address():\r\n address = input(\"What is the customer's address?: \")\r\n\r\n return address",
"def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }",
"def print_bar_code(zip_code):\r\n zip_sum = 0\r\n check_digit = 0\r\n\r\n print(\"|\", end=\"\")\r\n\r\n for i in zip_code:\r\n i = int(i)\r\n print_digit(i)\r\n zip_sum += i\r\n\r\n while zip_sum % 10 != 0:\r\n check_digit += 1\r\n zip_sum += 1\r\n\r\n print_digit(check_digit)\r\n print(\"|\")",
"def main(options):\n home = Address(options.name,\n options.address,\n options.city,\n options.state,\n options.zip_code)",
"def geo_coder(house_number, boro_code, street_name, zip_code): \r\n wa1 = '1B{}{}{}{}{}C{}{}'.format(rightpad(house_number, 16), rightpad('', 38), boro_code, rightpad('', 10), rightpad(street_name, 32), rightpad('', 113), rightpad(zip_code, 5))\r\n wa1 = rightpad(wa1, 1200)\r\n wa2 = rightpad('', 4300)\r\n NYCGeo.NYCgeo(wa1, wa2)\r\n return wa1, wa2",
"def city_state_zip(**kwargs):\r\n result = \"{city_name}, {state_code}\".format(**kwargs)\r\n if kwargs[\"five_digit_zip_code\"]:\r\n # RLID for some reason has two spaces between state & ZIP.\r\n result += \" {five_digit_zip_code}\".format(**kwargs)\r\n return result",
"def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue",
"def get_city(zip_code):\r\n\r\n # API key, retrieved from configure.py\r\n api_key = configure.ZIP_KEY\r\n\r\n # API endpoint\r\n url = f'https://www.zipcodeapi.com/rest/{api_key}/info.json/{zip_code}/degrees'\r\n\r\n # API call\r\n response = requests.get(url)\r\n\r\n # Collect response in json format\r\n data = response.json()\r\n\r\n if 'error_code' in data or 'error_msg' in data:\r\n return {\r\n 'success': False,\r\n 'query': zip_code\r\n }\r\n\r\n else:\r\n return {\r\n 'success': True,\r\n 'query': data['zip_code'],\r\n 'city': data['city'],\r\n 'state': data['state'],\r\n 'lat': data['lat'],\r\n 'lon': data['lng']\r\n }",
"def main() -> None:\n\n airports = {}\n some_info = {'item1': 1,\n 'item2': 2,\n }\n\n # adding items\n airports['YYZ'] = \"Toronto Pearson\"\n airports['YOW'] = \"Ottawa Canada\"\n airports['DUB'] = \"Dublin Ireland\"\n airports['LHR'] = \"London Heathrow\"\n\n # input & process\n print(\"All the airports:\")\n for key, value in airports.items():\n print(f\"The airport code is {key} for {value}.\")\n print(\"\")\n\n airport_name = input(\"Type in an airport code: \")\n if airport_name in airports:\n print(f\"The name of the airport you chose is {airports[airport_name]}.\")\n else:\n print(\"That airport is not in the airport's dictionary.\")\n\n print(\"\\nDone.\")",
"def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county",
"def get_info_on_postalcode(_, postalcode):\n fourpp = int(postalcode[0:4])\n chars = postalcode[4:6]\n streets = get_streets(fourpp, chars)\n if streets:\n street = streets[0]\n town = street.postcode.city.get_official_name()\n address = street.street\n data = {'found': True, 'address': address, 'town': town}\n else:\n data = {'found': False}\n j = json.dumps(data)\n return HttpResponse(j, content_type='application/json')",
"def GeoLocZip(zip_code, cntry):\r\n nb_error = 0\r\n #Try connection with OSM server\r\n while(nb_error < 100):\r\n try :\r\n #connection succeed\r\n time.sleep(1)\r\n g = geocoder.osm(str(zip_code)+' '+str(cntry))\r\n break\r\n except:\r\n #connection failed\r\n #try again\r\n nb_error += 1\r\n print(\"error req - nb_error : \"+str(nb_error))\r\n continue\r\n #g.osm['x'] = longitude\r\n #g.osm['y'] = latitude\r\n return g.osm['x'], g.osm['y']",
"def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code",
"def test_can_lookup_postcode(self):\n postcode_to_lookup = \"SW1A 1AA\"\n os_places_key = self.app.config.get(\"OS_PLACES_API_KEY\")\n addresses = AddressLookup(key=os_places_key).by_postcode(postcode_to_lookup)\n self.assertGreater(len(addresses), 0)\n result_postcode = addresses[0].get(\"DPA\", {}).get(\"POSTCODE\")\n self.assertEqual(result_postcode, postcode_to_lookup)",
"def type_zip_code(self, zip_code):\n\n\t\twith allure.step(\"Type payee zip code\"):\n\t\t\telement = Element(driver=self.driver,\n\t\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\t\telement.write(zip_code)\n\t\t\treturn None",
"def search_zipcode(driver, zipcode, links_file, min_page):\n time.sleep(LOADING_TIME)\n driver = fill_forms(driver, zipcode)\n\n # If a minimum page number was inputted, go to that url directly\n if min_page:\n url = driver.current_url\n page_url = url.split('&ns=1')[0] + '&start=' + str((min_page - 1) * 10)\n driver.get(page_url)\n\n source = driver.page_source\n soup = BeautifulSoup(source, \"html.parser\")\n \n # If it hit a captcha, stop everything\n if check_captcha(soup):\n sys.exit('Hit Captcha at zipcode ' + zipcode)\n\n # If it didn't find anything for that zip code, skip it\n if not soup.find(text = \"Sorry, but we didn't understand the location you entered.\"):\n # Go to each business link on search page\n driver = go_through_businesses(driver, soup, links_file, zipcode)\n\n # Go to next page of businesses in search, if there is a next page\n while True:\n try:\n # next_tag = driver.find_element_by_xpath(\"//a[@class='page-option prev-next next']\")\n next_tag = driver.find_element_by_xpath(\"//a[@class='u-decoration-none next pagination-links_anchor']\")\n # If there's no next button, you're done\n except:\n break\n else:\n driver.execute_script(\"return arguments[0].scrollIntoView();\", next_tag)\n time.sleep(LOADING_TIME)\n\n next_tag.click()\n time.sleep(LOADING_TIME)\n source = driver.page_source\n soup = BeautifulSoup(source, \"html.parser\")\n driver = go_through_businesses(driver, soup, links_file, zipcode)\n\n return driver",
"def bot_start_up(city: 'str', postal_code: 'str', province: 'str'):\n # Check that the parameters of the correct data type\n assert type(city) is str, \"City be a string\"\n assert type(city) is str, \"Postal code needs to be a string\"\n assert type(city) is str, \"Province code needs to be a string\"\n\n # Using Chrome to access web\n driver = webdriver.Chrome()\n # Open the website\n driver.get('https://www.dominos.ca/en/pages/order/#!/locations/search/')\n time.sleep(TIME_DELAY)\n\n # click on the carryout button\n element = driver.find_element_by_xpath(\"//*[@id='Service_Type_Carryout']\")\n driver.execute_script(\"arguments[0].click();\", element)\n\n # Select on your closest dominos\n dom_select(driver, city, postal_code, province)\n\n # Get the coupons\n coupon_gathering(driver)",
"def ask(self):\n self.term = str(input(\"What are you looking for? (Coffee, Restaurants, Museums, Bars) \"))\n if self.term.lower() == 'quit':\n sys.exit()\n self.destination = str(input(\"Where are you looking to go? (Neighborhood, City or City, State) \"))\n if self.destination.lower() == 'quit':\n sys.exit()\n \n \n #Request/JSON\n self.request = self.session.get(\"http://api.yelp.com/v2/search\", params={'term': self.term,'location': self.destination})\n self.request = self.request.json()\n \n #Dataframing\n self.menu = json_normalize(self.request['businesses'])\n self.menu.index = list(range(1, 21))\n self.menu = self.menu[['name', 'categories', 'location.address', 'location.city', 'location.coordinate.latitude', \\\n 'location.coordinate.longitude', 'review_count', 'rating', 'snippet_text']]\\\n .sort_values(['rating'], ascending=False).sort_index()",
"def get_weather_report():\n try:\n lat, long = None, None\n option = input(\"Get weather report of City \\n1.Select from database \\n2.Enter manually\\n\")\n if option == '1':\n lat, long = get_by_city()\n elif option == '2':\n lat, long = get_by_altitute()\n print(\"~~~~~~~~~~~~~~~~~~~~~~~Getting the weather report for the city~~~~~~~~~~~~~~~~~~~~\")\n response = requests.get(BASE_URL.format(lat, long, API_KEYS), verify=True)\n print(json.dumps(json.loads(response.text), indent=4))\n except ValueError:\n print(\"Select correct option.\")",
"def find_zip_codes(self, zip_code):\n zip_code = str(zip_code).strip()\n cursor = self.households.find({\"addresses.zip_code\":zip_code})\n results = [Household.from_dict(dct) for dct in cursor]\n\n cursor = self.businesses.find({\"address.zip_code\":zip_code})\n results += [Business.from_dict(dct) for dct in cursor]\n\n return results",
"def input_postal_code(self, postal_code):\n self.send_keys_to_element(self.postalcode_textbox_selector, postal_code)",
"def select_location(locations):\n\n # list locations with a number: 1. address1, 2. address2,...\n # and show those to the user\n loc_list = [str(num + 1) + ') ' + loc for num, loc in enumerate(locations['address'])]\n for loc in loc_list:\n print(loc)\n\n # select the location\n streets_numbers = [str(ii) for ii in range(1,27)]\n street = 'a'\n while street not in streets_numbers:\n street = input('Mitä katua haluat tarkastella? Anna numero 1 - 26. ')\n\n\n # return the selected street when index = selection - 1\n return int(street)-1",
"def address():\n # We start with generating the street name. For this we choose\n # between the most common prefixes and our own prefixes\n prefix = dice.randint(1, 100)\n if prefix <= 10: # 10%\n prefix = \"Haupt\"\n elif prefix <= 18: # 8%\n prefix = \"Schul\"\n elif prefix <= 25: # 7%\n prefix = \"Garten\"\n elif prefix <= 32: # 7%\n prefix = \"Dorf\"\n elif prefix <= 39: # 7%\n prefix = \"Bahnhof\"\n elif prefix <= 46: # 7%\n prefix = \"Wiesen\"\n elif prefix <= 52: # 6%\n prefix = \"Berg\"\n elif prefix <= 56: # 4%\n prefix = \"Kirch\"\n elif prefix <= 60: # 4%\n prefix = \"Wald\"\n elif prefix <= 64: # 4%\n prefix = \"Ring\"\n else:\n prefix = dice.choice(names.prefix)\n\n # Now we can add the suffix\n suffix = dice.randint(1, 100)\n if suffix <= 78:\n suffix = \"straße\"\n elif suffix <= 96:\n suffix = \"weg\"\n elif suffix <= 98:\n suffix = \"allee\"\n elif suffix == 99:\n suffix = \"ring\"\n elif suffix == 100:\n suffix = \"platz\"\n\n # When we have a city name as prefix, we need to capitalize the\n # suffix since it will be two words\n if prefix[-1] == \" \":\n suffix = suffix.capitalize()\n\n # Now we can add them together\n street = prefix + suffix\n\n # We need a house number as well. In Germany most numbers have\n # between one and four digits, so we will use this as base. Lower\n # numbers are more common, so we'll give it a 10% probability of\n # using 3 digits and 1% of using 4 digits\n digits = dice.randint(1, 100)\n if digits == 100:\n house_number = str(dice.randint(1000, 9999))\n elif digits >= 90:\n house_number = str(dice.randint(100, 999))\n else:\n house_number = str(dice.randint(1, 99))\n address_full = street + \" \" + house_number\n return address_full",
"def main():\n CheckInternet = internet_on()\n if (CheckInternet == True):\n pass\n else:\n print(\"Cannot connect to internet, Check your connection!\")\n exit() \n CityByLocation = GetCity()\n print(\"Hello, The weather in your city \" + CityByLocation + \" is:\\n\")\n Weather = GetWeatherByLocation()\n PrintWeather(Weather)\n print(\"To get weather in your any other city, enter city name and press ENTER, to continue just press ENTER\")\n GetCityByUser()\n return 1",
"def GetCityByUser():\n City = str(input())\n if (City == \"\"):\n exit()\n Weather = GetWeatherByCity(City)\n print(\"The weather in \" + City + \" is:\\n\")\n PrintWeather(Weather) \n return 1"
]
| [
"0.72115284",
"0.6591404",
"0.64162874",
"0.6266131",
"0.61080503",
"0.59263486",
"0.5911287",
"0.5887681",
"0.58561957",
"0.58087796",
"0.5789213",
"0.5735703",
"0.5721184",
"0.57066685",
"0.5688648",
"0.5640758",
"0.5616105",
"0.5614457",
"0.5599004",
"0.55722016",
"0.55711716",
"0.55289584",
"0.55243796",
"0.5483232",
"0.5479713",
"0.53686696",
"0.53669524",
"0.5334057",
"0.5332076",
"0.5325309"
]
| 0.7830859 | 0 |
Assert that source_files are installed in install_path and selected source files are obfuscated. | def assert_source_files_are_installed_and_obfuscated(install_path, source_files,
source_files_without_obfuscate_path=None):
assert os.path.isdir(install_path), "%s does not exist" % install_path
for source_file, op, version, check_obfuscation in source_files:
file_path = os.path.join(install_path, source_file)
assert os.path.isfile(file_path), "%s file not found in %s" % (source_file, install_path)
properties = get_file_properties(file_path)
product_version = properties["StringFileInfo"].get('ProductVersion', None) if properties["StringFileInfo"] is not None else None
assert compare_versions_str(product_version, op, version, default=True), \
"%s ProductVersion %s is not %s %s" % (file_path, product_version, op, version)
if check_obfuscation:
assert_not_equal_hash(file_path, source_files_without_obfuscate_path + source_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_idea_missing_sources(self):\n self._idea_test(['testprojects/src/java/org/pantsbuild/testproject/missing_sources'])",
"def assert_assembly_files_are_installed(assembly_source_files, microsoft_assembly_source_files):\n for assembly_file in assembly_source_files:\n assert is_file_in_subdirectory(\"%s.dll\"% assembly_file, WINDOWS_ASSEMBLY_GAC_MSIL_PATH + assembly_file), \\\n \"%s file not found in directory %s\" % (assembly_file, WINDOWS_ASSEMBLY_GAC_MSIL_PATH)\n\n for assembly_file in microsoft_assembly_source_files:\n assert is_file_in_subdirectory(\"%s.dll\" % assembly_file, WINDOWS_DOTNET_ASSEMBLY_GAC_MSIL_PATH + assembly_file), \\\n \"%s file not found in directory %s\" % (assembly_file, WINDOWS_DOTNET_ASSEMBLY_GAC_MSIL_PATH)",
"def verify_inputs(self):\n if self.has_source():\n raise Exception(\"Installation from source is only available for \"\n \"`virtualenv` manager\")\n if self.has_extras():\n raise Exception(\"Installation of extras only possible for \"\n \"`virtualenv` manager\")",
"def _validate_sources(generated_sources, original_sources):\n\n generated_sources = list(set(generated_sources))\n original_sources = list(set(original_sources))\n not_existent_source = []\n for source in original_sources:\n if source not in generated_sources:\n not_existent_source.append(source)\n\n if not_existent_source:\n print('WARN: Some sources did exist in generated file')\n print(not_existent_source)\n return False\n\n return True",
"def test_check():\n for f in cfg.required_files:\n assert os.path.isfile(f)",
"def test_verify_changed_source_file(self):\n # This test was made to pass in fixing Bug #1354880\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Test verify for the file\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])",
"def test_check_source_3(self):\n self.eval_flags[\"check_host_typo\"] = False\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 1)",
"def test_check_source_2(self):\n self.eval_flags[\"check_id_typo\"] = False\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)",
"def _test_local_install():\n if os.getcwd() == os.sep.join(\n os.path.abspath(__file__).split(os.sep)[:-2]):\n import warnings\n warnings.warn('Running the tests from the install directory may '\n 'trigger some failures')",
"def test_verify(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])",
"def test_install(self):\n self.assertIn('kser', [x.key for x in pkg_resources.working_set])",
"def test_verify_compare_data_changed_source_file(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Test verify for edited file fails with --compare-data\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable',\n options=[u\"--compare-data\"])\n except CmdError as e:\n self.assertEqual(e.exit_status, 1, str(e))\n else:\n self.fail('Expected CmdError not thrown')",
"def test_check_source_4(self):\n self.src1.organism = \"\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)",
"def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"rdp_lineage_to_tax.py\", get_files)",
"def test_check_source_1(self):\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 4)",
"def test_check(self):\n\n self.assertTrue(PostfixExclude().check(self.file_gitignore))\n self.assertTrue(PostfixExclude().check(self.file_py))\n self.assertTrue(PostfixExclude().check(self.file_authors))\n self.assertTrue(PostfixExclude().check(self.file__init__))\n self.assertTrue(PostfixExclude().check(self.file_bin))",
"def test_source_package_exists(self):\n response = self.client.head(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n self.assertEqual(response.status_code, status.OK)",
"def test_check_source_5(self):\n self.src1.host = \"\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)",
"def check_before_run(self, required_files):\n if isinstance(required_files, str):\n required_files = [required_files]\n\n for fpath in required_files:\n if not osp.exists(fpath):\n raise RuntimeError('\"{}\" is not found'.format(fpath))",
"def test_known_file_locations(dataset: linux.LinuxSourcesDataset):\n assert (dataset.src_tree_root / \"kernel\" / \"kexec.c\").is_file()\n assert (dataset.src_tree_root / \"kernel\" / \"smpboot.h\").is_file()",
"def test_ensure_copyright():\n issues = []\n regex = re.compile(r\"# Copyright \\d{4}(-\\d{4})? Canonical Ltd.$\")\n for filepath in get_python_filepaths():\n if os.stat(filepath).st_size == 0:\n continue\n\n with open(filepath, \"rt\", encoding=\"utf8\") as fh:\n for line in itertools.islice(fh, 5):\n if regex.match(line):\n break\n else:\n issues.append(filepath)\n if issues:\n msg = \"Please add copyright headers to the following files:\\n\" + \"\\n\".join(issues)\n pytest.fail(msg, pytrace=False)",
"def _checkSourcesAvailability(sourcesList):\n sources = sourcesList.split(\",\")\n for source in sources:\n try:\n importlib.import_module(\".sources.\"\n + source\n + \".main\",\n __package__).WesenSource\n except ImportError as e:\n print(e)\n print(\"The source code for one of your AIs could not be loaded: \",\n source)\n sys.exit()",
"def testPreProcess(self):\n self.grr_hunt_file_collector.PreProcess()\n self.assertEqual(\n self.grr_hunt_file_collector.file_path_list,\n ['/etc/passwd', '/etc/shadow', '/etc/hosts'])",
"def test_copy_required_include_and_exclude(self):\n include = ['yara/*', '*_malware_*']\n exclude = ['*mobile*', 'yara/?.yara']\n\n self.assertTrue(clone_rules._copy_required('yara/packed.yara', include, exclude))\n self.assertTrue(clone_rules._copy_required('base_malware_index.yara', include, exclude))\n self.assertTrue(clone_rules._copy_required('yara/mac_malware.yar', include, exclude))\n\n self.assertFalse(clone_rules._copy_required('not_included.yara', include, exclude))\n self.assertFalse(clone_rules._copy_required('yara/mobile_malware.yara', include, exclude))\n self.assertFalse(clone_rules._copy_required('yara/A.yara', include, exclude))",
"def test_use_pep517(shared_data: TestData, source: str, expected: bool) -> None:\n src = shared_data.src.joinpath(source)\n req = InstallRequirement(None, None)\n req.source_dir = os.fspath(src) # make req believe it has been unpacked\n req.load_pyproject_toml()\n assert req.use_pep517 is expected",
"def test_check_source_6(self):\n self.src1.lab_host = \"\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)",
"def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)",
"def check_before_run(self, required_files):\n if isinstance(required_files, str):\n required_files = [required_files]\n\n for fpath in required_files:\n if not os.path.exists(fpath):\n raise RuntimeError('\"{}\" is not found'.format(fpath))",
"def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"write_qiime_train_db.py\", get_files)",
"def test_core_files_hw(self):\n self.test_core_files()"
]
| [
"0.6273379",
"0.610558",
"0.5990108",
"0.5975442",
"0.59727985",
"0.5918797",
"0.5813625",
"0.57438725",
"0.5667816",
"0.56490386",
"0.5627883",
"0.55917925",
"0.5557579",
"0.55402726",
"0.55304146",
"0.55293643",
"0.5524229",
"0.5511834",
"0.5504771",
"0.55013037",
"0.5497226",
"0.5491665",
"0.5469851",
"0.54526174",
"0.5451718",
"0.5424866",
"0.53930986",
"0.538377",
"0.5380911",
"0.5371163"
]
| 0.82752395 | 0 |
Assert that files are installed in GAC_MSIL. | def assert_assembly_files_are_installed(assembly_source_files, microsoft_assembly_source_files):
for assembly_file in assembly_source_files:
assert is_file_in_subdirectory("%s.dll"% assembly_file, WINDOWS_ASSEMBLY_GAC_MSIL_PATH + assembly_file), \
"%s file not found in directory %s" % (assembly_file, WINDOWS_ASSEMBLY_GAC_MSIL_PATH)
for assembly_file in microsoft_assembly_source_files:
assert is_file_in_subdirectory("%s.dll" % assembly_file, WINDOWS_DOTNET_ASSEMBLY_GAC_MSIL_PATH + assembly_file), \
"%s file not found in directory %s" % (assembly_file, WINDOWS_DOTNET_ASSEMBLY_GAC_MSIL_PATH) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_mmelemental_imported():\n import sys\n\n assert \"mmelemental\" in sys.modules",
"def test_check():\n for f in cfg.required_files:\n assert os.path.isfile(f)",
"def test_network_lic_file_present(self):\n\n str_matlab_bin_path = self.host.check_output(\"readlink -f $(which matlab)\")\n matlab_dir = Path(str_matlab_bin_path).parents[1]\n network_lic_path = matlab_dir / \"licenses\" / \"network.lic\"\n self.assertTrue(self.host.file(str(network_lic_path)).exists)",
"def test_6_1_8_etc_gshadow_isfile(host):\n assert host.file(ETC_GSHADOW).is_file",
"def test_pkglibdir(self):\n self.chck_triple('pkglibdir')",
"def test_managed_install(visualstudio, tmp_path):\n assert not visualstudio.managed_install",
"def test_72_packages(host, pkg):\n assert host.package(pkg).is_installed",
"def test_is_installed():\n assert _is_installed('coverage') is True # regular dependency\n assert _is_installed('pytest') is True # dev dependency\n assert _is_installed('missing') is False # missing dependency",
"def test_install(self):\n self.assertIn('kser', [x.key for x in pkg_resources.working_set])",
"def sanity_check_step(self):\n\n binprefix = \"bin/intel64\"\n libprefix = \"lib/intel64/lib\"\n if LooseVersion(self.version) >= LooseVersion(\"2011\"):\n if LooseVersion(self.version) <= LooseVersion(\"2011.3.174\"):\n binprefix = \"bin\"\n elif LooseVersion(self.version) >= LooseVersion(\"2013_sp1\"):\n binprefix = \"bin\"\n libprefix = \"lib/intel64/lib\"\n else:\n libprefix = \"compiler/lib/intel64/lib\"\n\n custom_paths = {\n 'files': [\"%s/%s\" % (binprefix, x) for x in [\"icc\", \"icpc\", \"idb\"]] +\n [\"%s%s\" % (libprefix, x) for x in [\"iomp5.a\", \"iomp5.so\"]],\n 'dirs': [],\n }\n\n super(EB_icc, self).sanity_check_step(custom_paths=custom_paths)",
"def checkIfImport():\n instance_ipath, product_ipath = getImportedPathes()\n product_ilist = [i for i in os.listdir(product_ipath) \\\n if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]\n if product_ilist:\n return 1\n return 0",
"def test_6_1_8_etc_gshadow_exists(host):\n assert host.file(ETC_GSHADOW).exists",
"def test_install_packages():\n\n\tassert packaging.install_packages(pkgs) == None",
"def test_load(self):\n detected_path = GnuPG.path()\n\n self.assertIsNotNone(detected_path)\n self.assertIn(detected_path, ['/usr/bin/gpg2', '/usr/bin/gpg'])",
"def test_molecool_imported():\n assert \"molecool\" in sys.modules",
"def test_imports():\n assert False",
"def test_libdir(self):\n self.chck_triple('libdir')",
"def test_rlmm_imported():\n assert \"rlmm\" in sys.modules",
"def test_powerlaw_package_available(self):\n\t\timport importlib\n\n\t\ttry:\n\t\t\timportlib.import_module('powerlaw')\n\t\texcept ImportError:\n\t\t\tself.fail(\"Failed to import powerlaw package\")\n\n\t\treturn",
"def test_ufedmm_imported():\n assert \"ufedmm\" in sys.modules",
"def test_check_module(self) -> None:\n check_module(\"os\")",
"def test_scripts_are_installed(self):\n fits_file = os.path.join(self.datadir, 'monol_testA.evt')\n command = 'HENreadfile {0}'.format(fits_file)\n sp.check_call(command.split())",
"def check_install(self, gppkg_filename):\n cmd = \"gppkg -q %s\" % gppkg_filename\n results = run_command(cmd)\n test_str = ''.join(gppkg_filename.split('-')[:1]) + \" is installed\"\n is_installed = test_str in results\n return is_installed and CheckFile(os.path.join(ARCHIVE_PATH, gppkg_filename)).run()",
"def test_archivename(self):\n\n for testfile in ['6mbzipattachment.eml', '6mbrarattachment.eml']:\n try:\n # copy file rules\n tmpfile = tempfile.NamedTemporaryFile(\n suffix='virus', prefix='fuglu-unittest', dir='/tmp')\n shutil.copy(\"%s/%s\" % (TESTDATADIR, testfile), tmpfile.name)\n\n user = '[email protected]'\n conffile = self.tempdir + \"/%s-archivenames.conf\" % user\n open(conffile, 'w').write(\n \"deny largefile user does not like the largefile within a zip\\ndeny 6mbfile user does not like the largefile within a zip\")\n self.rulescache._loadrules()\n suspect = Suspect(\n '[email protected]', user, tmpfile.name)\n\n result = self.candidate.examine(suspect)\n if type(result) is tuple:\n result, message = result\n self.assertEqual(\n result, DELETE, 'archive containing blocked filename was not blocked')\n finally:\n tmpfile.close()\n os.remove(conffile)",
"def testInitialization(self):\n self.assertEqual(\n self.grr_hunt_file_collector.file_path_list,\n ['/etc/passwd', '/etc/shadow']\n )",
"def test_xchemOT_imported():\n assert \"xchemOT\" in sys.modules",
"def test_azurecli_package_installed(host):\n assert host.package(PACKAGE).is_installed",
"def test_ensure_copyright():\n issues = []\n regex = re.compile(r\"# Copyright \\d{4}(-\\d{4})? Canonical Ltd.$\")\n for filepath in get_python_filepaths():\n if os.stat(filepath).st_size == 0:\n continue\n\n with open(filepath, \"rt\", encoding=\"utf8\") as fh:\n for line in itertools.islice(fh, 5):\n if regex.match(line):\n break\n else:\n issues.append(filepath)\n if issues:\n msg = \"Please add copyright headers to the following files:\\n\" + \"\\n\".join(issues)\n pytest.fail(msg, pytrace=False)",
"def test_load_fails_no_shell(install_mockery, mock_fetch, mock_archive, mock_packages):\n install(\"mpileaks\")\n\n out = load(\"mpileaks\", fail_on_error=False)\n assert \"To set up shell support\" in out",
"def test_importable():\n root_path = os.path.dirname(MY_DIRECTORY)\n\n for version in versioning.get_all_versions():\n v = version.label.replace(\".\", \"_\")\n path = os.path.join(root_path, v)\n module_names = [m[:-3] for m in os.listdir(path) if m.endswith(\".py\")]\n for name in module_names:\n m = importlib.import_module(\".\".join([\"kuber\", v, name]))\n assert m is not None, f\"Expected kuber.{v}.{m} to be importable.\""
]
| [
"0.5879728",
"0.5766973",
"0.5738156",
"0.5738147",
"0.569487",
"0.56844616",
"0.5623787",
"0.5610653",
"0.5580728",
"0.5538616",
"0.5493665",
"0.54421926",
"0.53928185",
"0.53919005",
"0.53829306",
"0.5343697",
"0.53233",
"0.53141606",
"0.5281856",
"0.5258395",
"0.5239167",
"0.52342653",
"0.52220297",
"0.5209219",
"0.52018523",
"0.5201486",
"0.5197044",
"0.51947105",
"0.5193833",
"0.517716"
]
| 0.66300887 | 0 |
Simply apply the value_counts function to every column in a dataframe | def pandas_value_counts(df):
return df.apply(pd.value_counts).fillna(0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def obj_value_counts(df):\n df_obj = obj_df(df)\n for col in df_obj.columns:\n print(df_obj[col].value_counts())\n print('-'*100)",
"def count(df):\r\n\r\n\tdf_count_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_count_dict[col] = df[col].count()\r\n\r\n\tdf_count = pd.DataFrame(df_count_dict, index=['Count'])\r\n\r\n\treturn df_count",
"def value_frequencies(df, column):\n\n df_val = pd.DataFrame(df[column].value_counts())\n df_val = df_val.rename(columns={column: \"Count\"})\n df_val[\"Frequency\"] = df_val[\"Count\"] / len(df) * 100\n return df_val",
"def count_entries(df, *args):\n \n #Initialize an empty dictionary: cols_count\n cols_count = {}\n \n # Iterate over column names in args\n for col_name in args:\n \n # Extract column from DataFrame: col\n col = df[col_name]\n \n # Iterate over the column in DataFrame\n for entry in col:\n \n # If entry is in cols_count, add 1\n if entry in cols_count.keys():\n cols_count[entry] += 1\n \n # Else add the entry to cols_count, set the value to 1\n else:\n cols_count[entry] = 1\n\n # Return the cols_count dictionary\n return cols_count",
"def get_value_counts(X, columns, cate_cap=30):\n counts = {}\n for col in columns:\n temp = dict(X[col].value_counts())\n temp['NaN'] = X[col].isnull().sum()\n if len(temp) > cate_cap:\n counts[col] = 'There are more than %d categories. Please check this column.' % cate_cap\n else:\n counts[col] = temp\n return counts",
"def category_freq(dataframe):\n total_rows = len(dataframe)\n for col in dataframe:\n # don't include the Name category in our results\n if dataframe[col].name == 'Name':\n continue\n num_categories = len(dataframe.groupby(col))\n print(\n \"---- %s TOTAL CATEGORIES FOR %s ----\"\n % (num_categories, dataframe[col].name))\n # generate series to list occurrences of each column value\n col_vals = dataframe[col].value_counts()\n # store series as DataFrame\n result_df = col_vals.to_frame()\n # generate series to display percentages\n as_percent = 100 * col_vals / float(total_rows)\n # append percentages column to DataFrame\n result_df['percentage'] = as_percent\n print(result_df)",
"def get_class_count(df):\r\n \r\n return df[\"class\"].value_counts()",
"def get_counts(df,col_name):\n return df.groupBy(col_name).count().show()",
"def get_value_counts_pd(X, columns, cate_cap=30):\n count_dict = get_value_counts(X, columns=columns, cate_cap=cate_cap)\n idx_tuple = []\n value_counts = []\n for col in count_dict.keys():\n if type(count_dict[col]) == str:\n idx_tuple += [(col, col)]\n value_counts += ['Too many categories']\n else: \n temp = [[col]*len(count_dict[col]), count_dict[col].keys()]\n idx_tuple += list(zip(*temp))\n value_counts += count_dict[col].values()\n multiidx = pd.MultiIndex.from_tuples(idx_tuple, names=['column', 'category'])\n counts_df = pd.DataFrame(value_counts, columns=['counts'], index=multiidx)\n return counts_df",
"def custom_numpy_count(df, weights=None):\n val = df.values\n un = np.unique(val.reshape(-1))\n if weights:\n pass\n r = {u: np.einsum('i, ij->j', weights, (val == u)) if weights is not None else np.einsum('ij->j', (val == u).astype(int)) for u in un}\n return pd.DataFrame(r).transpose()",
"def check_unique_value(df, colnames):\r\n mydict = {}\r\n for col in colnames:\r\n val_count = (df[col].value_counts(dropna=False)).to_dict()\r\n mydict[col] = val_count\r\n pprint(mydict)\r\n return",
"def count_vector(df:pd.DataFrame, column_name:str, y:list=None):\n vectorizer = CountVectorizer()\n # print(vectorizer.get_feature_names())\n ans = vectorizer.fit_transform(raw_documents=df[column_name], y=y)\n return ans",
"def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df",
"def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df",
"def popular_counts_column(column):\n popular_anything = column.mode()[0]\n counts_anything = column.value_counts()[popular_anything]\n \n return popular_anything, counts_anything",
"def tran_count(df, *args):\n # Compute the count\n df_res = DataFrame(\n df.groupby([*args]).size()\n ).reset_index()\n # Change column name\n col = list(df_res.columns)\n col[-1] = \"n\"\n df_res.columns = col\n\n return df_res",
"def get_unique_counts(df, colname):\n\treturn list(dict(df[colname].value_counts(ascending=False, dropna=False)).values())",
"def return_uniques_and_counts(df):\n keys, count = dict(), dict()\n keys = (\n df.iloc[:, :].sum(axis=1).apply(set).apply(sorted).to_dict()\n ) # adding all columns\n count = {k: len(v) for k, v in keys.items()}\n\n return keys, count",
"def same_num_of_unique_val(df):\n value_count =dict()\n for col in df.columns:\n value_count[col] = list(df[col].value_counts())\n similar_columns = [i for i in combinations(df.columns,2) if (value_count[i[0]]==value_count[i[1]] and i[0] != i[1])]\n if similar_columns != []:\n for (col1, col2) in similar_columns :\n printmd(str(\"* *\" + str(col1) +\"* and *\"+ str(col2)+ \"* have same number of values \"))\n a = pd.DataFrame(df[col1].value_counts()).reset_index()\n a.columns = [str('values_'+col1), 'count']\n b = pd.DataFrame(df[col2].value_counts()).reset_index()\n b.columns = [str('values_'+col2), 'count']\n to_display = a.merge(b, on = 'count')\n display(to_display[['count', str('values_'+col1), str('values_'+col2)]])\n\n else :\n printmd(\"* No columns have same number of unique values\")",
"def encoding_labelcount(df, target=None):\n if not target:\n target = ['user_id', 'title']\n\n norm = round(\n df.shape[0] / 10000) # normalize the count by /per 100000 entries\n for col in target:\n df[col + '_labelcount'] = df[col].map(df[col].value_counts()) / norm\n df.drop([col], axis=1, inplace=True)\n return None",
"def GetFreq(ds,colList=None,cutoff=100):\n if colList is None:\n colList=ds.nunique()\n colList=colList[(colList<=cutoff) & (colList>0)].index.tolist()\n else :\n colList=colList\n \n #d = pd.DataFrame((ds[colList].apply(pd.value_counts).T.stack())) # it doesn't gives nan values\n d = pd.DataFrame(ds[colList].apply(lambda x: x.value_counts(dropna=False)).T.stack())\n d.reset_index(inplace=True)\n d.rename(columns={\"level_0\":\"FeatureName\",\"level_1\":\"Levels\",0:\"Freq\"},inplace=True)\n d['Levels'] = d['Levels'].fillna(\"NA\") #replace nan to NA\n d['Prop'] = d['Freq']/ds.shape[0]\n \n return d",
"def count_vectorizer(self, df:pd.DataFrame, column_name:str, y:list=None):\n self._vectorizer = CountVectorizer()\n # print(vectorizer.get_feature_names())\n ans = self._vectorizer.fit_transform(raw_documents=df[column_name], y=y)\n return ans",
"def count_prdctr_freqs(results, column_out):\n \n from collections import Counter\n \n c = Counter()\n for itm in results: \n item = itm\n if(isnumber(item)): \n item = [item]\n \n c.update(Counter(item))\n \n df_out = pd.DataFrame(list(dict(c).items()), columns=[column_out, 'Frequency (%)'])\n df_out = df_out.sort_values(by=['Frequency (%)'], ascending=False)\n df_out['Frequency (%)'] = (df_out['Frequency (%)']/results.shape[0])*100.\n \n return df_out",
"def freq_table(a):\n Detail_freq = a.loc[:, (a.dtypes == object) | (a.dtypes == long) ].columns.get_values().tolist()\n print(Detail_freq)\n for freq in Detail_freq:\n df1 = pd.DataFrame(a[freq].value_counts(dropna=False).astype(float).map('{:20,.0f}'.format).sort_index()).rename(columns={freq:'Count'})\n df2 = pd.DataFrame(a[freq].value_counts(normalize = True, dropna=False).map('{:,.2%}'.format).sort_index()).rename(columns={freq:'Percentage'})\n df = pd.concat([df1, df2], axis = 1)\n print(df)",
"def label_counts(rows):\n counts = rows.iloc[:, -1].value_counts()\n return counts",
"def unique_count(df):\r\n\r\n data = []\r\n\r\n for column in df.columns:\r\n data.append((column, df.select(column).distinct().count()))\r\n\r\n return spark.createDataFrame(data, ['column', 'count'])",
"def number_of_values(df,value=0,axis=0):\n \n return (df == value).astype(int).sum(axis=1-axis)",
"def count_instances(tbl, col2count, colcounted):\n counted_ser = tbl[col2count].value_counts()\n counted_df = pd.DataFrame(counted_ser, columns=[colcounted]).reset_index()\n counted_df.rename(columns={'index':col2count},inplace=True)\n tbl = tbl.merge(counted_df,on=col2count)\n return tbl",
"def freq_and_prop(\n series: Union[pd.Series, Iterable[Any]], **value_counts_kwargs: Mapping[Any, Any]\n) -> pd.DataFrame:\n if not isinstance(series, pd.Series):\n series = pd.Series(series)\n item_name = series.name\n res = pd.concat(\n [\n series.value_counts(\n **value_counts_kwargs\n # since 'count' is a method, better to use\n # 'freq' as the column name\n ).rename(\"freq\"),\n series.value_counts(normalize=True, **value_counts_kwargs).rename(\"prop\"),\n ],\n axis=1,\n )\n res.index.rename(item_name, inplace=True)\n return res",
"def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)"
]
| [
"0.80173266",
"0.7216902",
"0.7100851",
"0.6849541",
"0.68442845",
"0.67460424",
"0.674539",
"0.67321604",
"0.66469705",
"0.65439206",
"0.6435983",
"0.6374298",
"0.63453734",
"0.63453734",
"0.63251334",
"0.6316298",
"0.6299988",
"0.62826157",
"0.62393385",
"0.62360716",
"0.62215275",
"0.62157935",
"0.62145555",
"0.6180407",
"0.6166407",
"0.6159553",
"0.6135817",
"0.6134515",
"0.6124301",
"0.6098094"
]
| 0.7974501 | 1 |
Generates a path based on url. | def generate_path(url, output_path='', site_root=''):
path = [] if not site_root else [site_root.replace('/', '')]
for item in url.split('/'):
if item:
path.append(item)
if '.' not in path[-1] and path[-1].split('.'):
path.append('index.html')
return os.path.join(output_path, *path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_url(self, path):\n if not self.base_location:\n raise ValueError(\"No base_location set. Cannot construct url.\")\n\n if path:\n path = self._normalise_last_slashes(path)\n path = self._normalise_head_slashes(path)\n\n return \"\".join((self.base_location, self.endpoint, path))",
"def path_for(self, url, pagename):\n parts = pagename.split('/')[:-1]\n if len(parts) == 0:\n return url[1:]\n return os.path.relpath(url, '/%s' % '/'.join(parts))",
"def construct_url(self,*path):\n base = self.request.protocol+\"://\"+self.request.host+\"/\"\n return base+\"/\".join(path)",
"def get_path(url):\n # Different resources like customers, orders as each of them have files\n # names like 1.xml, 2.xml for individual records.\n # If the last part of URL is an integer, return last two parts of URL\n first, second_last, last = url.rsplit('/', 2)\n\n if last.isdigit():\n return os.path.join(second_last, last + '.xml')\n else:\n # If the last part is not an integer, return last part itself\n return last + '.xml'",
"def _url(self, path):\n \n return self.url + path",
"def _generate_url(self, **kwargs):\n path = self.url_path.format(**kwargs)\n return self.poolbot.generate_url(path)",
"def build_url(base_url, path):\n if absolute_http_url_regexp.match(path):\n return path\n elif base_url:\n return \"{}/{}\".format(base_url.rstrip(\"/\"), path.lstrip(\"/\"))\n else:\n raise exceptions.ParamsError(\"base url missed!\")",
"def urlpath(self, url):\n\t\t# remove schema + hostname\n\t\turl = re.sub('^[^:]*://[^/]+', '/', url)\n\n\t\treturn self.canonicalize(url)",
"def generate_full_url(base_url, lineage, segment):\n params = \"/\".join([lineage, segment])\n return urljoin(base_url, params)",
"def _get_dir_url(endpoint, path, **kwargs):\n if not path:\n return url_for(endpoint)\n else:\n #if self._on_windows:\n # path = path.replace('\\\\', '/')\n\n kwargs['path'] = path\n\n return url_for(endpoint, **kwargs)",
"def _make_url(self, url_part, blueprint_prefix):\n parts = (blueprint_prefix, self.prefix, url_part)\n return ''.join(_ for _ in parts if _)",
"def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])",
"def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])",
"def _make_url(self):\n ...",
"def GetPathFromUrl(url):\n return __ParseUrl(url)[2]",
"def url(self, path: str) -> str:\n return url_util.join(self.base_url, path)",
"def _create_url(self, event_id: str, path: str) -> str | None:\n if path == \"thumbnail.jpg\":\n return str(URL(self._host) / f\"api/events/{event_id}/thumbnail.jpg\")\n\n if path == \"snapshot.jpg\":\n return str(URL(self._host) / f\"api/events/{event_id}/snapshot.jpg\")\n\n camera = path.split(\"/\")[0]\n if path.endswith(\"clip.mp4\"):\n return str(URL(self._host) / f\"clips/{camera}-{event_id}.mp4\")",
"def load_path_url():\n web.ctx.path_url = web.ctx.home + web.ctx.path",
"def create_url(path, controller_ip=DNAC):\n print(\"3\")\n return \"https://%s:%s/api/v1/%s\" % (controller_ip, DNAC_PORT, path)",
"def make_path(request):\n path = get_path(request.param)\n yield path\n path = None",
"def gen_query_url(self, url, function, format=None, method=None, get_args=None):\n function = self.namespace_map[function]\n return '%s/%s' % (url, function)",
"def compile_route_to_url(self):\n\n if 'http' in self.redirect_url:\n return self.redirect_url\n\n # Split the url into a list\n split_url = self.redirect_url.split('/')\n\n # Start beginning of the new compiled url\n compiled_url = '/'\n\n # Iterate over the list\n for url in split_url:\n\n # if the url contains a parameter variable like @id:int\n if '@' in url:\n url = url.replace('@', '').replace(\n ':int', '').replace(':string', '')\n compiled_url += str(self.param(url)) + '/'\n else:\n compiled_url += url + '/'\n\n # The loop isn't perfect and may have an unwanted trailing slash\n if compiled_url.endswith('/') and not self.redirect_url.endswith('/'):\n compiled_url = compiled_url[:-1]\n\n # The loop isn't perfect and may have 2 slashes next to eachother\n if '//' in compiled_url:\n compiled_url = compiled_url.replace('//', '/')\n\n return compiled_url",
"def url_for(self, *args, **kwargs):\n return yarl.URL(self.url(parts=kwargs))",
"def generate_url(domainname = None):\n path_length = random.choice([1,2,3,4,5])\n path = ''\n for i in range(path_length):\n path = path + '/' + ''.join(generate_string(5, valid_domain_name_chars))\n if domainname:\n return 'http://www.'+domainname+path\n else: \n return 'http://www.'+generate_domainname()+path",
"def _construct_url(self, endpoint):\n return self.base_url + self.api_path + endpoint.strip('/')",
"def generate_url(self, version: str, plat: Platform) -> str:\n platform = self.url_platform_mapping.get(plat.value, \"\")\n url = self.url_template.format(version=version, platform=platform)\n extension = \"gz\" if plat.is_macos else \"xz\"\n return f\"{url}.{extension}\"",
"def get_url(domain, path):\n\n return f\"https://{domain}.freshservice.com/api/v2/{path}\"",
"def place(self, url):\n url = [\n '/scrappings',\n get_canonical_url(url),\n get_hash_path(url)\n ]\n\n return \"/\".join(url)",
"def construct_path(id_val):\n id_val = str(id_val)\n path = id_val[:3] + \"/\" + id_val[3:6] + \"/\" + id_val[6:9] + \"/\"\n path += id_val\n return path",
"def _generate_request_url(endpoint: str, request_path: str) -> str:\n if endpoint is None or request_path is None:\n raise ValueError(\"endpoint and request_path are required.\")\n if urllib_parse.urlparse(request_path).path != request_path:\n raise ValueError('Incorrect format for request_path: {request_path}'.format(**{'request_path': request_path}))\n return endpoint + request_path"
]
| [
"0.6959691",
"0.6937469",
"0.676501",
"0.67328423",
"0.6726451",
"0.6627678",
"0.65264916",
"0.6452767",
"0.64111346",
"0.640524",
"0.6360018",
"0.63565695",
"0.63565695",
"0.6339681",
"0.62146103",
"0.6203177",
"0.61992043",
"0.61939573",
"0.615887",
"0.61470675",
"0.61186945",
"0.6113632",
"0.6100465",
"0.6099195",
"0.6091186",
"0.6074816",
"0.60725754",
"0.60708034",
"0.6062796",
"0.60421336"
]
| 0.7457683 | 0 |
initialize a new contest with country name and db cursor. | def __init__(self, country, cursor):
self.country = country
self.cursor = cursor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_data_for_countries(db_data):\n countries = db_data.get('country')\n if countries is not None:\n rows = countries.get('data')\n for row in rows:\n country = Country(name=row)\n db_add_and_commit(db, country)",
"def __init__(self, db, verbose, cand_id=None, visit_label=None,\n center_id=None, project_id=None, cohort_id=None):\n self.db = db\n self.verbose = verbose\n\n self.proj_cohort_rel_db_obj = ProjectCohortRel(db, verbose)\n self.session_db_obj = SessionDB(db, verbose)\n self.site_db_obj = Site(db, verbose)\n\n self.cand_id = str(cand_id)\n self.visit_label = visit_label\n self.center_id = center_id\n self.project_id = project_id\n self.cohort_id = cohort_id\n\n self.proj_cohort_rel_info_dict = dict()\n self.session_info_dict = dict()\n self.session_id = None",
"def create(self):\n c = Contest.objects.create(name=self.name,\n site_sport=self.site_sport,\n prize_structure=self.prize_structure,\n start=self.start,\n end=self.end,\n skill_level=self.skill_level)\n logger.info('Contest created: %s' % c)\n return c",
"def populate(self):\n\n NUM_COUNTRIES = 2 # random.randint(1, 4)\n\n # find a suitable hex\n with Timer(\"Creating initial data\", debug=self.debug):\n\n for i in range(NUM_COUNTRIES):\n country, provinces, pops = create_country(self, self.map)\n country.determine_tax_policy()\n self.countries.append(country)",
"def __init__(self):\n self._db = db\n # Connect to DB\n self._db.connect()\n # Create tables\n self._db.create_tables([Teachers, Parents, Tutors, Students, Homework, Groups, StudentsGroups, Courses])\n # Create filling entries\n self.__create_dummies()\n self._db.close()",
"def __init__(self, city, state, country):\n self.city = city\n self.state = state\n self.country = country",
"def __init__(self):\r\n date_time('Connecting to local database ...')\r\n\r\n self.conn = sqlite3.connect(DATABASE_PATH)\r\n self.cursor = self.conn.cursor()\r\n\r\n # Set up database\r\n self.cursor.execute('PRAGMA synchronous = OFF')\r\n self.cursor.execute('PRAGMA journal_mode = OFF')\r\n self.cursor.execute('PRAGMA locking_mode = EXCLUSIVE')\r\n self.cursor.execute('PRAGMA count_changes = FALSE')\r\n\r\n self.cursor.execute('CREATE TABLE IF NOT EXISTS citations (id INTEGER PRIMARY KEY, citation TEXT UNIQUE);')",
"def create_table_country(db, f):\n # connect to the database and create a cursor\n\n # drop the table if it already exists; re-create it\n\n # Populate the StatelessCountByRegion Table\n # Loop through each line in the file:\n # for line in f:\n # Write the body of this loop yourself. Handle the whitespace around\n # each line, split each line on SEP, and insert the relevant values\n # into the table.\n # Hint: line.strip().split(SEP) will return you a list.\n\n # don't forget to close the cursor, commit the changes and close\n # the connection",
"def __init__(self, name, supply_centers):\r\n self.countries = {}\r\n self.name = name\r\n self.supply_centers = supply_centers",
"def initialize(self):\n\n db = dict()\n\n db['meta'] = Meta(None)\n db['race'] = Race(None, None, None, None, None)\n db['track'] = Track(None, None)\n db['classes'] = set([])\n db['teams'] = set([])\n db['drivers'] = set([])\n\n self.db = db",
"def setUp(self):\n self.schema = {\n \"name\": \"Country\",\n \"id\": \"#Country\",\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"abbreviation\": {\"type\": \"string\"},\n \"languages\": {\"type\": [\"array\", \"null\"], \"items\": {\"type\": \"string\"}},\n },\n \"additionalProperties\": False,\n }\n\n # Connect to formal_test - hopefully it doesn't exist\n formal.connect(\"formal_test\")\n self.Country = formal.model_factory(self.schema)\n\n # Drop all the data in it\n self.Country.collection().delete_many({})\n\n # Create some defaults\n self.Country({\"name\": \"Sweden\", \"abbreviation\": \"SE\", \"languages\": [\"swedish\"]})\n self.Country(\n {\n \"name\": \"United States of America\",\n \"abbreviation\": \"US\",\n \"languages\": [\"english\"],\n }\n )",
"def setUp(self):\n Beneficiary.objects.create(id=1, lastname='Doe', lastname2='', middlename='', firstname='Jane', nativename='',\n nationality_country_iso_code='FRA', code='', date_of_birth='1970-07-01',\n country_of_birth_iso_code='FRA', gender='Male', address='42 Rue des fleurs',\n postal_code='75000', city='Paris', country_iso_code='FRA', msisdn='1123131413',\n email='[email protected]', id_type='PASSPORT', id_country_iso_code='',\n id_number='1123131413', occupation='Teacher', bank_accout_holder_name='',\n province_state='')\n self.client = Client()",
"def initialize():\n sql_db = SQLConnection()\n with SQLCursor(sql_db) as cur:\n cur.execute('SELECT position from govt_info')\n row = cur.fetchone()\n for pos in Government.positions:\n if row is None or len(row) != len(Government.positions):\n cur.execute('INSERT OR IGNORE INTO govt_info (position) VALUES (?);', (pos,))",
"def __init__(self):\r\n assert isfile(DBClass.db_name), \"Database doesn't exists!\"\r\n\r\n self.conn = self.create_connection()\r\n self.cursor = self.conn.cursor()",
"def initialize(self):\r\n state_name = self.state\r\n\r\n state_name = state_name.lower()\r\n\r\n response = requests.get(\"https://cdn-api.co-vin.in/api/v2/admin/location/states\") \r\n\r\n if response.ok:\r\n\r\n df = pd.DataFrame(json.loads(response.text)[\"states\"]) \r\n\r\n state = process.extractOne(state_name, df[\"state_name\"].tolist()) # fuzzy match to get best state match \r\n\r\n self.state_id = df.loc[df.state_name == state[0],[\"state_id\"]].values[0][0] \r\n self.load_districts()",
"def populate_countries(self):\n # For each country in population.\n for name, pop in self.population.iterrows():\n p = pop['Population']\n # Get all relevant time series based on country name.\n c = self.raw_confirmed.loc[self.raw_confirmed['Country/Region'] == name].sum(numeric_only=True)\n d = self.raw_deceased.loc[self.raw_deceased['Country/Region'] == name].sum(numeric_only=True)\n r = self.raw_recovered.loc[self.raw_recovered['Country/Region'] == name].sum(numeric_only=True)\n # Create new country object.\n self.countries.append(country.Country(name, p, c, d, r))",
"def createCountryTable(conn):\n \n c = conn.cursor()\n \n c.execute(\"CREATE TABLE country (name TEXT, capital TEXT, population FLOAT)\") \n conn.commit()",
"def __init__(self):\n \n self._citydb = CityDB()\n self._depression = 6 # Set default depression in degrees",
"def test_CovidCase_creation(self):\n new_Covid = self.create_CovidCase()\n\n self.assertTrue(isinstance(new_Covid, CovidCase))\n self.assertEqual(new_Covid.country_id, \"TE\")",
"def __init__(self, *args, **kw):\n # kw['strIdent'] = DBCAT\n BaseDB.__init__(self, *args, **kw)\n # cache by project name as key and project Id as value\n self._gbl_projectid_cache = {}",
"def __init__(self, contest=None, contest_id=None):\n\n if contest_id is not None:\n self.contest = Contest.objects.get(pk=contest_id)\n elif contest is not None:\n self.contest = contest\n else:\n raise Exception('contest must not be None')\n\n dgm = DraftGroupManager()\n self.draft_group_players = dgm.get_players(self.contest.draft_group)\n\n # a map where the player id points to their own id if their game\n # has started, or to 0xffff if they havent started yet\n #\n # i can see a reason we would want to cache the get_starter_map result ....\n self.starter_map = self.get_starter_map(self.draft_group_players)\n\n # determine the size of a lineup in bytes\n rm = RosterManager(self.contest.site_sport)\n self.players_per_lineup = rm.get_roster_spots_count()\n\n self.entries = Entry.objects.filter(contest=self.contest)",
"def fill_cites(self):\n response = requests.get(\"https://restcountries.eu/rest/v2/all\")\n json_content = json.loads(response.text)\n i = 0\n for t in json_content:\n currency = t[\"currencies\"][0][\"code\"]\n pop = t[\"population\"]\n state_name = t[\"name\"]\n self.cities_from_api[t[\"capital\"].lower()] = [str(state_name), str(currency), str(pop)]",
"def __init__(__self__, *,\n columns: pulumi.Input[Sequence[pulumi.Input[str]]],\n name: pulumi.Input[str],\n country_code: Optional[pulumi.Input['DataSetGeoSpatialCountryCode']] = None):\n pulumi.set(__self__, \"columns\", columns)\n pulumi.set(__self__, \"name\", name)\n if country_code is not None:\n pulumi.set(__self__, \"country_code\", country_code)",
"def new_contest(self):\n contest_creator = ContestCreator(self.contest_pool.name, self.contest_pool.site_sport,\n self.contest_pool.prize_structure,\n self.contest_pool.start, self.contest_pool.end)\n return contest_creator.create()",
"def testNormalCreate(self):\n\n canada = self.Country(\n {\"name\": \"Canada\", \"abbreviation\": \"CA\", \"languages\": [\"english\", \"french\"]}\n )\n\n canada.save()\n\n self.assertEqual(\"Canada\", canada.name)\n self.assertEqual(\"CA\", canada.abbreviation)\n self.assertEqual(2, len(canada.languages))\n self.assertTrue(\"english\" in canada.languages)\n self.assertTrue(\"french\" in canada.languages)",
"def __init__(self):\n self.connection = DbConnector()\n self.db_connection = self.connection.db_connection\n self.cursor = self.connection.cursor\n\n self.ACTIVITY_ID = 1\n self.TRACKPOINT_ID = 1",
"def __init__(self, year=None, month=None, day=None, map_type='Cases'):\n self.covid_df = pd.DataFrame([])\n self.geo_data = pd.DataFrame([])\n self.name_iso2_mapping = {}\n self.countries_centroids = pd.DataFrame([])\n \n try:\n self.date = datetime(year=year, month=month, day=day)\n except:\n print('Invalid/empty date entry (year, month, day take valid int inputs)! Date defaulted to today.')\n self.date = datetime.today()\n \n if self.date > datetime.today():\n print('Can\\'t input future date! Date defaulted to today.')\n self.date = datetime.today()\n \n if map_type not in ['Cases', 'Deaths']:\n sys.exit('Please specify either \"Cases\" or \"Deaths\" as map type!')\n else:\n self.map_type = map_type",
"def __init__(self, dbfile):\n self.dbfile = dbfile\n self.cxn = sqlite3.connect(dbfile)\n self.cur = self.cxn.cursor()",
"def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry",
"def __init__(self, db_location = ':memory:'):\n self.connection = sqlite3.connect(db_location)\n self.cur = self.connection.cursor()\n self.create_table()"
]
| [
"0.660491",
"0.6150415",
"0.58685863",
"0.5862052",
"0.58298",
"0.5803479",
"0.57945013",
"0.5729912",
"0.5638307",
"0.56267554",
"0.56196684",
"0.5617302",
"0.5576676",
"0.55443263",
"0.5519045",
"0.5514122",
"0.5504914",
"0.5501202",
"0.5499147",
"0.5491143",
"0.54765165",
"0.5462006",
"0.5460873",
"0.5416756",
"0.5414809",
"0.54084957",
"0.5396942",
"0.5388386",
"0.5377123",
"0.53676754"
]
| 0.70173824 | 0 |
find a notebook, given its fully qualified name and an optional path This turns "foo.bar" into "foo/bar.ipynb" and tries turning "Foo_Bar" into "Foo Bar" if Foo_Bar does not exist. | def find_notebook(fullname, path=None):
name = fullname.rsplit(".", 1)[-1]
if not path:
path = [""]
for d in path:
nb_path = os.path.join(d, name + ".ipynb")
if os.path.isfile(nb_path):
return nb_path
# let import Notebook_Name find "Notebook Name.ipynb"
nb_path = nb_path.replace("_", " ")
if os.path.isfile(nb_path):
return nb_path
# load .nbpy documents
nbpy_path = os.path.join(d, name + ".nbpy")
if os.path.isfile(nbpy_path):
return nbpy_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _resolve_nb_path(self, nb_path):\n if nb_path is None and self._notebook is not None:\n nb_path = self._notebook\n\n elif nb_path is None and glob(\"*.ipynb\"):\n notebooks = glob(\"*.ipynb\")\n assert len(notebooks) == 1, \"nb_path not specified and > 1 notebook in working directory\"\n nb_path = notebooks[0]\n\n elif nb_path is None:\n raise ValueError(\"Could not resolve notebook path\")\n\n return nb_path",
"def get_notebook_name():\n # taken from https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246\n\n try:\n import requests\n from requests.compat import urljoin\n except ImportError:\n msg = \"This functions depends on the module requests.\"\n # it is not an official dependency because this is not a core functionality\n raise ImportError(msg)\n\n import ipykernel\n import json\n import re\n from notebook.notebookapp import list_running_servers\n\n kernel_id = re.search('kernel-(.*).json',\n ipykernel.connect.get_connection_file()).group(1)\n servers = list_running_servers()\n for ss in servers:\n response = requests.get(urljoin(ss['url'], 'api/sessions'),\n params={'token': ss.get('token', '')})\n for nn in json.loads(response.text):\n if nn['kernel']['id'] == kernel_id:\n relative_path = nn['notebook']['path']\n return os.path.join(ss['notebook_dir'], relative_path)",
"def get_notebook(self, fname):\n matches = [nb for nb in self.notebooks if nb.filename == fname]\n assert len(matches) <= 1, fname\n return matches[0]",
"def _get_nb_path(name: str, suffix: str = \"\", abs: bool = True, ext: str = \".ipynb\") -> str:\n _local_path = os.path.dirname(__file__)\n path = f\"{_local_path}/testdata/{name}{suffix}{ext}\"\n return os.path.abspath(path) if abs else path",
"def get_notebook(self, name, path='', content=True):\n\t\tif not self.notebook_exists(name=name, path=path):\n\t\t\traise web.HTTPError(404, u'Notebook does not exist: %s' % name)\n\t\tos_path = self._get_os_path(name, path)\n\n\t\tkey = self.bucket.get_key(os_path)\n\t\tmodel = {}\n\t\tmodel['name'] = name\n\t\tmodel['path'] = path\n\t\tmodel['last_modified'] = key.last_modified\n\t\tmodel['created'] = key.last_modified\n\t\tmodel['type'] = 'notebook'\n\t\tif content:\n\t\t\tnb = current.reads(key.get_contents_as_string(), u'json')\n\t\t\tself.mark_trusted_cells(nb, name, path)\n\t\t\tmodel['content'] = nb\n\t\treturn model",
"def get_notebook_path():\n kernel_id = re.search('kernel-(.*).json',\n ipykernel.connect.get_connection_file()).group(1)\n servers = list_running_servers()\n for ss in servers:\n response = requests.get(urljoin(ss['url'], 'api/sessions'),\n params={'token': ss.get('token', '')})\n for nn in json.loads(response.text):\n if nn['kernel']['id'] == kernel_id:\n relative_path = nn['notebook']['path']\n return os.path.join(ss['notebook_dir'], relative_path)",
"def notebook_to_string(nb_path: Union[str, nbformat.NotebookNode]) -> str: \n if isinstance(nb_path, str):\n with open(nb_path) as f:\n nb = json.load(f)\n elif isinstance(nb_path, nbformat.NotebookNode):\n nb = nb_path\n else:\n raise TypeError(\"invalid notebook type\")\n \n source = \"\"\n for cell in nb['cells']:\n if cell['cell_type'] == 'code':\n if isinstance(cell['source'], list):\n source += \"\".join(cell['source']) + \"\\n\"\n else:\n assert isinstance(cell['source'], str), f\"could not parse notebook cell: {cell}\"\n source += cell['source'] + \"\\n\"\n\n source = \"\\n\".join(l for l in source.split(\"\\n\") if not l.startswith(\"%\") and not l.startswith(\"!\"))\n return source",
"def update_notebook(self, model, name, path=''):\n\t\tnew_name = model.get('name', name)\n\t\tnew_path = model.get('path', path)\n\t\tif path != new_path or name != new_name:\n\t\t self.rename_notebook(name, path, new_name, new_path)\n\t\tmodel = self.get_notebook(new_name, new_path, content=False)\n\t\treturn model",
"def is_notebook(filename):\n root, ext = os.path.splitext(filename)\n if ext == \".ipynb\":\n return os.path.exists(filename)",
"def _notebook_run(path):\n dirname, __ = os.path.split(path)\n os.chdir(dirname)\n\n # Create a temporary file to write the notebook to.\n # 'with' method is used so the file is closed by tempfile\n # and free to be overwritten.\n # with tempfile.NamedTemporaryFile('w', suffix=\".ipynb\") as fout:\n with tempfile.NamedTemporaryFile(\n \"w\", suffix=\".nbconvert.ipynb\", delete=False\n ) as fout:\n nbpath = fout.name\n\n jupyter_exec = shutil.which(\"jupyter\")\n\n # recent version (~7.3.1) requires output without extension\n out_path = os.path.join(\n os.path.dirname(nbpath), os.path.basename(nbpath).split(\".\", 1)[0]\n )\n args = [\n jupyter_exec,\n \"nbconvert\",\n path,\n \"--output\",\n out_path,\n \"--to\",\n \"notebook\",\n \"--execute\",\n \"--ExecutePreprocessor.timeout=60\",\n ]\n subprocess.check_call(args)\n\n assert os.path.exists(nbpath), \"nbconvert used different output filename\"\n\n nb = nbformat.read(nbpath, nbformat.current_nbformat)\n\n errors = [\n output\n for cell in nb.cells\n if \"outputs\" in cell\n for output in cell[\"outputs\"]\n if output.output_type == \"error\"\n ]\n\n # Remove the temp file once the test is done\n if os.path.exists(nbpath):\n os.remove(nbpath)\n\n return nb, errors",
"def html2ipynb(path):\n # I don't understand why click isn't handling this?\n path = Path(path)\n if path.is_file() and path.suffix == '.html':\n print(f\"Checking {path}\")\n # Read notebook\n with path.open('r') as f:\n nb = nbformat.v4.new_notebook()\n\n html = f.read()\n soup = BeautifulSoup(html, 'lxml')\n \n for d in soup.findAll(\"div\"):\n if 'class' in d.attrs.keys():\n for clas in d.attrs[\"class\"]:\n if clas in [\"text_cell_render\", \"input_area\"]:\n # code cell\n if clas == \"input_area\":\n cell = nbformat.v4.new_code_cell(d.get_text())\n nb.cells.append(cell)\n\n else:\n cell = nbformat.v4.new_code_cell(d.decode_contents())\n nb.cells.append(cell)\n\n \n outpath = path.with_suffix('.ipynb')\n nbformat.write(nb, outpath.open('w'))",
"def notebook_contains(search_str='',\n on_docker=True,\n git_dir='~/git/experiments/',\n start_date='2015-01-01', end_date='2018-12-31',\n exclude_str='checkpoint',\n include_prefix=False,\n prefix='notebooks/'):\n if on_docker:\n base_dir = \"/home/jovyan/work/\"\n else:\n base_dir = git_dir[:]\n dates = date_range_array(start=start_date, end=end_date)\n rel_files = relevant_files_list(base_dir, dates, exclude_str)\n files = files_containing_str(search_str, rel_files)\n if prefix[-1] == '/':\n prefix = prefix[:-1]\n if include_prefix:\n return [prefix+el.split(basename(prefix))[-1] for el in files]\n else:\n return [el.split(basename(prefix))[-1] for el in files]",
"def replace_ipynb(root):\n for (dirpath, dirname, fnames) in os.walk(root):\n for fname in fnames:\n name, ext = os.path.splitext(fname)\n if ext == \".ipynb\":\n in_fpath = \"{}/{}\".format(dirpath, fname)\n out_fpath = \"{}/{}\".format(dirpath, name + \".py\")\n notebook = load_json(in_fpath)\n code = pull_code(notebook)\n write_code(code, out_fpath)",
"def notebook_run(path):\n dirname, __ = os.path.split(path)\n os.chdir(dirname)\n with tempfile.NamedTemporaryFile(suffix=\".ipynb\") as fout:\n args = [\n \"jupyter\",\n \"nbconvert\",\n \"--to\",\n \"notebook\",\n \"--execute\",\n \"--ExecutePreprocessor.timeout=60\",\n \"--output\",\n fout.name,\n path,\n ]\n subprocess.check_call(args)\n\n fout.seek(0)\n nb = nbformat.read(fout, nbformat.current_nbformat)\n\n errors = [\n output\n for cell in nb.cells\n if \"outputs\" in cell\n for output in cell[\"outputs\"]\n if output.output_type == \"error\"\n ]\n\n return nb, errors",
"def guess_type(self, path, allow_directory=True):\n if path.endswith(\".ipynb\"):\n return \"notebook\"\n elif allow_directory and self.dir_exists(path):\n return \"directory\"\n else:\n return \"file\"",
"def test_should_handle_notebook_with_invalid_python_name_with_conf(work_dir, mocker):\n mocked_check_output = mocker.patch('subprocess.check_output', return_value=work_dir.encode())\n notebook_path = gen_notebook(cells=[('code', 'pass')], tmp_dir=work_dir, file_name='01_(test) nb.ipynb')\n\n # Create conf in a freshly init git repo\n conf_data = write_conf(work_dir=work_dir, conf_path=join(work_dir, DEFAULT_CONF_FILENAME),\n ignore_keys=['# Ignore', 'remove='])\n\n cmd_arguments = ['-n', notebook_path]\n IPynbToPython().run(*cmd_arguments)\n\n # This path is generated using the conf script_dir and the notebook name\n output_script_path = join(work_dir, conf_data['path']['python_script_root_dir'], 'mlvtools_01__test_nb.py')\n assert exists(output_script_path)\n\n with open(output_script_path, 'r') as fd:\n file_content = fd.read()\n\n # Ensure generated file syntax is right\n compile(file_content, output_script_path, 'exec')\n\n assert mocked_check_output.mock_calls == [mocker.call(\n ['git', 'rev-parse', '--show-toplevel'],\n cwd=work_dir)]",
"def test_notebook():\n jupyter_notebooks = os.getenv('PYNQ_JUPYTER_NOTEBOOKS')\n\n # Try and find the notebook\n if os.path.isdir(f\"{jupyter_notebooks}/pynq-helloworld\"):\n if os.path.isfile(f\"{jupyter_notebooks}/pynq-helloworld/resizer_pl.ipynb\"): \n result = run_notebook(f\"{jupyter_notebooks}/pynq-helloworld/resizer_pl.ipynb\")\n else:\n raise CannotFindNotebook(f\"unable to locate the helloworld notebook, expecting it at {jupyter_notebooks}/pynq-helloworld/resizer_pl.ipynb\")\n else:\n raise CannotFindNotebook(f\"unable to locate the helloworld directory, expecting it at {jupyter_notebooks}/pynq-helloworld\")",
"def _notebook_run(path):\n dirname, __ = os.path.split(path)\n os.chdir(dirname)\n with tempfile.NamedTemporaryFile(suffix=\".ipynb\") as fout:\n args = [\"jupyter\", \"nbconvert\", \"--to\", \"notebook\", \"--execute\",\n \"--ExecutePreprocessor.timeout=600\",\n \"--output\", fout.name, path]\n subprocess.check_call(args)\n\n # fout.seek(0)\n nb = nbformat.read(fout.name, nbformat.current_nbformat)\n\n errors = [output\n for cell in nb.cells if \"outputs\" in cell\n for output in cell[\"outputs\"]\n if output.output_type == \"error\"]\n\n return nb, errors",
"def from_notebook(cls):\n notebook_name = cls.get_notebook_name()\n\n if notebook_name == 'Untitled':\n msg = 'Please set a name for this notebook that is not \"Untitled\".'\n raise ValueError(msg)\n\n return cls(base_dir=notebook_name)",
"def ipynb_path(self):\n return Path(self.dir_path, self.index_file + \".ipynb\").abspath",
"def read_as_notebook(rmd_path):\n with open(rmd_path) as f:\n lines = [l.strip(\"\\n\") for l in f.readlines()]\n\n new_lines = []\n in_comment = False\n in_solution_region, just_closed_solution_region = False, False\n has_prompt = False\n for i, l in enumerate(lines):\n # prevent excess whitespace in the student version of the notebook caused by the removal of\n # the lines containing the solution\n if just_closed_solution_region:\n just_closed_solution_region = False\n if l == \"\":\n continue\n\n if in_comment and l.strip() == HTML_COMMENT_END:\n new_lines.append(\"<!-- #endraw -->\")\n in_comment = False\n\n elif l.startswith(HTML_COMMENT_START):\n if HTML_COMMENT_END in l:\n if CONFIG_START_REGEX.search(l):\n if \"begin\" in l.lower() and \"prompt\" in l.lower():\n has_prompt = True\n if new_lines[len(new_lines) - 1].strip() == \"\":\n new_lines.pop(len(new_lines) - 1)\n\n if has_prompt:\n if \"begin\" in l.lower() and \"solution\" in l.lower():\n has_prompt = False\n if new_lines[len(new_lines) - 1].strip() == \"\":\n new_lines.pop(len(new_lines) - 1)\n\n elif \"end\" in l.lower() and \"prompt\" not in l.lower():\n has_prompt = False\n\n new_lines.append(\"<!-- #raw -->\")\n new_lines.append(EXTRACT_COMMENT_REGEX.match(l).group(1))\n new_lines.append(\"<!-- #endraw -->\")\n\n else:\n if l == \"\"\"<!-- #region tags=[\"otter_assign_solution_cell\"] -->\"\"\":\n in_solution_region = True\n elif in_solution_region and l == \"<!-- #endregion -->\":\n in_solution_region, just_closed_solution_region = False, True\n\n new_lines.append(l)\n\n elif l.strip() == HTML_COMMENT_START:\n if i + 1 < len(lines) and CONFIG_START_REGEX.match(lines[i + 1]):\n new_lines.append(\"<!-- #raw -->\")\n in_comment = True\n\n else:\n new_lines.append(l)\n\n else:\n new_lines.append(l)\n\n if in_comment:\n raise ValueError(\"R Markdown file ends with an unclosed HTML comment\")\n\n nb = jupytext.reads(\"\\n\".join(new_lines), \"Rmd\", as_version=NBFORMAT_VERSION)\n nb[\"metadata\"][\"kernelspec\"] = {\"language\": \"r\"}\n\n return nb",
"def FindBinary(module_space, bin_name):\n if not bin_name:\n return None\n if bin_name.startswith(\"//\"):\n # Case 1: Path is a label. Not supported yet.\n raise AssertionError(\n \"Bazel does not support execution of Python interpreters via labels yet\"\n )\n elif os.path.isabs(bin_name):\n # Case 2: Absolute path.\n return bin_name\n # Use normpath() to convert slashes to os.sep on Windows.\n elif os.sep in os.path.normpath(bin_name):\n # Case 3: Path is relative to the repo root.\n return os.path.join(module_space, bin_name)\n else:\n # Case 4: Path has to be looked up in the search path.\n return SearchPath(bin_name)",
"def _notebook_run(path, kernel=\"python3\", timeout=300):\n dirname, __ = os.path.split(path)\n os.chdir(dirname)\n with tempfile.NamedTemporaryFile(suffix=\".ipynb\") as fout:\n args = [\n \"jupyter\",\n \"nbconvert\",\n \"--to\",\n \"notebook\",\n \"--execute\",\n \"--ExecutePreprocessor.timeout={}\".format(timeout),\n \"--ExecutePreprocessor.kernel_name={}\".format(kernel),\n \"--output\",\n fout.name,\n path,\n ]\n subprocess.check_call(args)\n\n fout.seek(0)\n nb = nbformat.read(fout, nbformat.current_nbformat)\n\n errors = [\n output\n for cell in nb.cells\n if \"outputs\" in cell\n for output in cell[\"outputs\"]\n if output.output_type == \"error\"\n ]\n\n return nb, errors",
"def newnb(url, path, copy=None):\n # See IPython/html/services/notebooks/handlers.py for API details.\n\n # Compare directory contents before and after new notebook creation.\n names = [nb['name'] for nb in get_nblist(url, path) if nb['type'] == 'notebook']\n\n arg = path\n if isinstance(arg, unicode):\n arg = arg.encode('utf-8')\n\n post_url = urljoin(url, 'api/notebooks', quote(arg)).strip('/')\n if copy is not None:\n data = json.dumps({'copy_from': copy})\n else:\n data = ''\n try:\n resp = web.post(post_url, data=data)\n except URLError:\n raise URLError('Unable to reach %s. Try the \"nbserver\" keyword.' % url)\n resp.raise_for_status()\n\n new_contents = get_nblist(url, path)\n new_names = [nb['name'] for nb in new_contents if nb['type'] == 'notebook']\n try:\n newnbname = list(set(new_names) - set(names))[0]\n except IndexError:\n raise RuntimeError('Notebook creation at %s appears to have failed.' % post_url)\n return newnbname",
"def name_from_path(path):\n return path[0:-3]",
"def _read_rendered_notebook(nb_str):\n # add debug cells\n nb = nbformat.reads(nb_str, as_version=nbformat.NO_CONVERT)\n nbformat_v = nbformat.versions[nb.nbformat]\n\n source = \"\"\"\n# Debugging settings (this cell will be removed before saving)\n# change the current working directory to directory of the session that\n# invoked the jupyter app to make relative paths work\nimport os\n{}\n\"\"\".format(chdir_code(Path('.').resolve()))\n\n cell = nbformat_v.new_code_cell(source,\n metadata={'tags': ['debugging-settings']})\n nb.cells.insert(0, cell)\n\n return nb",
"def notebook_cache_director():\n return str(rmfriend_dir() / 'notebooks')",
"def main(args):\n replace_ipynb(args.root)",
"def lookup_module(filename):\r\n\r\n # stolen from pdb\r\n import os\r\n import sys\r\n\r\n if os.path.isabs(filename) and os.path.exists(filename):\r\n return filename\r\n f = os.path.join(sys.path[0], filename)\r\n if os.path.exists(f): # and self.canonic(f) == self.mainpyfile:\r\n return f\r\n root, ext = os.path.splitext(filename)\r\n if ext == '':\r\n filename = filename + '.py'\r\n if os.path.isabs(filename):\r\n return filename\r\n for dirname in sys.path:\r\n while os.path.islink(dirname):\r\n dirname = os.readlink(dirname)\r\n fullname = os.path.join(dirname, filename)\r\n if os.path.exists(fullname):\r\n return fullname\r\n return None",
"def main(path):\n with open(path, 'r') as f:\n notebook = json.load(f)\n notebook[\"cells\"] = [\n cell for cell in notebook[\"cells\"] if cell[\"cell_type\"] == \"markdown\"\n ]\n with open(path.replace(\".ipynb\", \".tmp.ipynb\"), 'w') as f:\n f.write(json.dumps(notebook))"
]
| [
"0.7191861",
"0.6188864",
"0.6052213",
"0.58972746",
"0.5611016",
"0.55624646",
"0.54965943",
"0.54194707",
"0.54043365",
"0.5306842",
"0.52780384",
"0.5267952",
"0.5224945",
"0.51331246",
"0.51177615",
"0.50909376",
"0.50849307",
"0.50610876",
"0.504978",
"0.50256366",
"0.50050414",
"0.499948",
"0.49872208",
"0.49872202",
"0.49378133",
"0.4918054",
"0.49078602",
"0.48534575",
"0.48247328",
"0.48000535"
]
| 0.815862 | 0 |
Function that tells if the given locale is supported by the Anaconda or not. We consider locales supported by the langtable as supported by the Anaconda. | def is_supported_locale(locale):
en_name = get_english_name(locale)
return bool(en_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Locale_IsAvailable(*args, **kwargs):\n return _gdi_.Locale_IsAvailable(*args, **kwargs)",
"def IsAvailable(*args, **kwargs):\n return _gdi_.Locale_IsAvailable(*args, **kwargs)",
"def language_supported(self, iso_lang=\"ca-ES\"): # -> bool\n test_lang = \"\"\n if len(iso_lang) == 0:\n return False\n try:\n for sep in [\"-\", \"_\"]:\n if sep in iso_lang:\n test_lang = iso_lang.split(sep)[0]\n break\n except (AttributeError, NameError):\n return False\n try:\n for _test in [iso_lang, test_lang]:\n if _test in gtts.tts.tts_langs():\n return True\n except NameError:\n pass\n return False",
"def locale_supported_in_console(locale):\n\n locale_scripts = get_locale_scripts(locale)\n return set(locale_scripts).issubset(SCRIPTS_SUPPORTED_BY_CONSOLE)",
"def ctx_has_locale() -> bool:\n ctx = _get_current_context()\n if ctx is None:\n return False\n return hasattr(ctx, 'babel_locale')",
"def test_get_supported_locales_for_voice_datasets(self):\n pass",
"def _is_supported_culture(self, culture):\n if not culture:\n return False\n cultures = self.config.cultures or configuration.cultures\n return culture in cultures",
"def language_supported(self,\n _iso_lang=\"en-US\",\n alt_local_url=\"\"): # -> bool\n _found_name = \"\"\n if alt_local_url.startswith(\"http\"):\n self.url = alt_local_url\n if self.ok:\n return self.ok\n if not bool(self.verified_voices):\n self.update_rhvoice_checklist()\n if not bool(self.verified_voices):\n self.ok = False\n return False\n self.ok = False\n for _search in [_iso_lang.lower(), _iso_lang.split(\"-\")[0].lower()]:\n for item in self.checklist:\n if item[0].lower().startswith(_search):\n self.checked_lang = item[0]\n self.ok = True\n break\n if len(self.checked_lang) != 0:\n break\n if len(self.checked_lang) != 0:\n for item in self.checklist:\n if bool(self.common.debug):\n print(item)\n if item[2] == _iso_lang.lower():\n self.checked_lang = item[0]\n self.ok = True\n break\n if self.ok:\n help_heading = self.help_heading\n help_url = self.help_url\n print(f\"\"\"\nChecking {help_heading} voices for `{_iso_lang}`\n========================================\n\n<{help_url}>\n\"\"\")\n return self.ok",
"def platform_supported(self):\n return platform.system().lower() in self.platforms if self.platforms else False",
"def compare_language(language):\n if language in module.availableLanguages:\n return True\n else:\n return False",
"def IsLoaded(*args, **kwargs):\n return _gdi_.Locale_IsLoaded(*args, **kwargs)",
"def validate_locale(self, locale: str) -> bool:\n\n return locale in self.possible_locale_list",
"def supported_languages(self):\n return SUPPORT_LANGUAGES",
"def validate_lang(lang):\n if lang in LANGUAGE_OPTIONS.keys():\n return True",
"def is_conda_available():\n cmd = ['conda', '--version']\n stdout = ''\n stderr = ''\n ERROR = False\n try:\n p = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n stdout, stderr = p.communicate()\n if PY3:\n stdout = stdout.decode()\n stderr = stderr.decode()\n except OSError:\n # conda was not found on path\n ERROR = True\n\n return (\n (stdout.startswith('conda ') or stderr.startswith('conda ')) and\n not ERROR\n )",
"def test_available_locales(translation_folder: Path) -> None:\n # expected grid\n res = [\"es\", \"fr\", \"fr-FR\", \"en\"]\n\n # create the translator\n # -en- to -en-\n translator = Translator(translation_folder)\n\n for locale in res:\n assert locale in translator.available_locales()\n\n # Check no hidden and protected files are in locales\n locales = translator.available_locales()\n assert not all([(loc.startswith(\".\") or loc.startswith(\"_\")) for loc in locales])\n\n return",
"def requires_matching_languages(self):\n return self._requires_matching_languages",
"def is_installed(cls, language=None):\n if language == 'python':\n return True\n return False",
"def cat_l3_supported():\n return common.CAT_L3_CAP in SYSTEM_CAPS",
"def bot_locales(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotLocaleArgs']]]]:\n return pulumi.get(self, \"bot_locales\")",
"def supports_open_gl():\n global SUPPORTS_OPENGL\n if SUPPORTS_OPENGL is None:\n ren_win = _vtk.vtkRenderWindow()\n SUPPORTS_OPENGL = bool(ren_win.SupportsOpenGL())\n return SUPPORTS_OPENGL",
"def cat_l2_supported():\n return common.CAT_L2_CAP in SYSTEM_CAPS",
"def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])",
"def available_languages():\n utility = queryUtility(ILanguageAvailability)\n if utility is not None:\n return utility.getAvailableLanguages()\n return [DEFAULT_LANGUAGE]",
"def check_supported_features(self):",
"def spacy_language_detection(row):\n global non_english_count_global\n\n nlp = spacy.load(\"en\")\n nlp.add_pipe(LanguageDetector(), name=\"language_detector\", last=True)\n document = nlp(row[\"tweet_full_text\"])\n # document level language detection. Think of it like average language of document!\n text_language = document._.language\n row[\"spaCy_language_detect\"] = str(text_language[\"language\"])\n print(\"spaCy language designation:\")\n print(str(text_language[\"language\"]))\n\n if not str(text_language[\"language\"]).startswith('en'):\n non_english_count_global += 1\n log.warning(f\"\\t\\t\\tnon-English tweet (will be dropped): \"\n f\"\\n\\t\\t\\t\\tid: {row['tweet_id']}\"\n f\"\\n\\t\\t\\t\\ttweet: {row['text_derived']}\"\n f\"\\n\\t\\t\\t\\tLanguage tags: {row['spaCy_language_detect']}\"\n )\n return row[\"spaCy_language_detect\"]",
"def can_handle_language(cls, language: Hashable) -> bool:\n\n # if language_list is set to `None` it means: support all languages\n if language is None or cls.language_list is None:\n return True\n\n return language in cls.language_list",
"def _os_supported(self, plugin):\r\n return sys.platform in plugin.plugin_object.get_supported_os()",
"def has_language(lang):\n kn = _get_keyboard_names()\n return kn.has_language(lang)",
"def get_available_locales(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GetAvailableLocales/\"))"
]
| [
"0.71748304",
"0.67560184",
"0.6504597",
"0.6485453",
"0.6169756",
"0.6107829",
"0.60411143",
"0.58866775",
"0.584165",
"0.58095956",
"0.57179374",
"0.56090534",
"0.55979425",
"0.55623186",
"0.5506499",
"0.54679406",
"0.543604",
"0.54240453",
"0.53689474",
"0.53326595",
"0.52523315",
"0.52250516",
"0.52235746",
"0.5206952",
"0.517939",
"0.51507324",
"0.5118077",
"0.51155335",
"0.509889",
"0.50876594"
]
| 0.7342547 | 0 |
Function that tells if the given locale can be displayed by the Linux console. The Linux console can display Latin, Cyrillic and Greek characters reliably, but others such as Japanese, can't be correctly installed. | def locale_supported_in_console(locale):
locale_scripts = get_locale_scripts(locale)
return set(locale_scripts).issubset(SCRIPTS_SUPPORTED_BY_CONSOLE) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Locale_IsAvailable(*args, **kwargs):\n return _gdi_.Locale_IsAvailable(*args, **kwargs)",
"def is_supported_locale(locale):\n\n en_name = get_english_name(locale)\n return bool(en_name)",
"def _system_supports_plotting():\n if os.environ.get('ALLOW_PLOTTING', '').lower() == 'true':\n return True\n\n # Windows case\n if os.name == 'nt':\n # actually have to check here. Somewhat expensive.\n return supports_open_gl()\n\n # mac case\n if platform.system() == 'Darwin':\n # check if finder available\n proc = Popen([\"pgrep\", \"-qx\", \"Finder\"], stdout=PIPE, stderr=PIPE)\n proc.communicate()\n if proc.returncode == 0:\n return True\n\n # display variable set, likely available\n return 'DISPLAY' in os.environ\n\n # Linux case\n try:\n proc = Popen([\"xset\", \"-q\"], stdout=PIPE, stderr=PIPE)\n proc.communicate()\n return proc.returncode == 0\n except OSError:\n return False",
"def IsAvailable(*args, **kwargs):\n return _gdi_.Locale_IsAvailable(*args, **kwargs)",
"def ctx_has_locale() -> bool:\n ctx = _get_current_context()\n if ctx is None:\n return False\n return hasattr(ctx, 'babel_locale')",
"def stdout_supports_color():\r\n plat = sys.platform\r\n supported_platform = plat != 'Pocket PC' and (plat != 'win32' or\r\n 'ANSICON' in os.environ)\r\n\r\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\r\n if not supported_platform or not is_a_tty:\r\n return False\r\n return True",
"def supports_color(): # pragma: no cover # noqa\n plat = sys.platform\n supported_platform = plat != 'Pocket PC' and (\n plat != 'win32' or 'ANSICON' in os.environ\n )\n\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n if not supported_platform or not is_a_tty:\n return False\n return True",
"def supports_color():\n plat = sys.platform\n supported_platform = plat != 'Pocket PC' and \\\n (plat != 'win32' or 'ANSICON' in os.environ)\n\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n if not supported_platform or not is_a_tty:\n return False\n return True",
"def terminal_supports_color():\n plat = sys.platform\n supported_platform = plat != \"Pocket PC\" and (\n plat != \"win32\" or \"ANSICON\" in os.environ\n )\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, \"isatty\") and sys.stdout.isatty()\n if not supported_platform or not is_a_tty:\n return False\n return True",
"def term_support_color():\n return OS_VERSION[0] == \"Linux\" or OS_VERSION[0] == \"Darwin\"",
"def win():\n if platform.system() in WINDOWS:\n return True\n return False",
"def terminal_configured():\n return lnp.userconfig.get('terminal_type') is not None",
"def supports_color():\n plat = sys.platform\n supported_platform = plat != 'Pocket PC' and (plat != 'win32'\n or 'ANSICON' in os.environ)\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n return supported_platform and is_a_tty",
"def test_system_platform():\n accepted_values = ['windows', 'linux']\n output = sh.system_platform()\n assert output in accepted_values",
"def supports_color():\n\n sys_platform = sys.platform\n supported = sys_platform != \"Pocket PC\" and (\n sys_platform != \"win32\" or \"ANSICON\" in os.environ\n )\n\n atty_connected = hasattr(sys.stdout, \"isatty\") and sys.stdout.isatty()\n return supported and atty_connected",
"def IsUnicodeSupported(self):\n return self._common_type.IsUnicodeSupported()",
"def os_is_windows():\n return platform.system() == \"Windows\"",
"def hasRootAccessToDisplay(display):\n # not necessary on windows\n return True",
"def is_colorterm():\n global _STATIC_VARS\n if 'colorterm' not in _STATIC_VARS:\n terms = ['ansi', 'xterm-color', 'xterm-256color', 'screen']\n _STATIC_VARS.colorterm = _STATIC_VARS.term and \\\n getenv('TERM') in terms\n return _STATIC_VARS.colorterm",
"def supports_color():\n unsupported_platform = (sys.platform in ('win32', 'Pocket PC'))\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n if unsupported_platform or not is_a_tty:\n return False\n return True",
"def get_locale_console_fonts(locale):\n\n parts = parse_langcode(locale)\n if \"language\" not in parts:\n raise InvalidLocaleSpec(\"'%s' is not a valid locale\" % locale)\n\n return langtable.list_consolefonts(languageId=parts[\"language\"],\n territoryId=parts.get(\"territory\", \"\"),\n scriptId=parts.get(\"script\", \"\"))",
"def check_display_option(display):\n display_options = get_display_options(verbose=False)\n if display not in display_options:\n err_str = \"The display value (%s) does not correspond to a possible \\\n display value in ENA\" % (display)\n raise ValueError(err_str)",
"def platform_supported(self):\n return platform.system().lower() in self.platforms if self.platforms else False",
"def isScreen(fmt):\n if fmt == 'CONS' or fmt == 'XWIN' or fmt =='XWLi':\n return 1\n return 0",
"def is_cli_installed(self):\n p = subprocess.Popen([\"which\", \"lpass\"])\n p.communicate()\n\n if p.returncode != 0:\n return False\n\n return True",
"def tty_supports_color():\r\n\t\t\r\n\t\tplat = sys.platform\r\n\r\n\t\tif plat == \"win32\":\r\n\t\t\treturn False\r\n\t\telse:\r\n\t\t\tsupported_platform = plat != 'Pocket PC' and (plat != 'win32' or\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'ANSICON' in os.environ)\r\n\t\t# isatty is not always implemented, #6223.\r\n\t\t\tis_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\r\n\t\t\treturn supported_platform and is_a_tty",
"def check_external_programs():\n\n convert = os.system('which convert > /dev/null') == 0\n\n # This structure allows for an easily extensible function;\n # it's here because at first, together with convert I also\n # used axel or something like that.\n error = ''\n if not convert: error += 'Dovresti installare ImageMagik.\\n'\n\n ok = convert\n if not ok:\n m = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,\n buttons=gtk.BUTTONS_CLOSE,\n message_format=error)\n m.set_title('Manca qualcosa...')\n m.run()\n m.destroy()\n\n return ok",
"def is_windows():\n return sys.platform == \"win32\"",
"def is_linux():\r\n return sys.platform.startswith('linux')",
"def is_windows():\n if os.name == \"nt\":\n return True\n return False"
]
| [
"0.6627515",
"0.6607746",
"0.6314651",
"0.61852306",
"0.6138253",
"0.58053625",
"0.57903194",
"0.5744814",
"0.5728713",
"0.5654515",
"0.56541127",
"0.56338316",
"0.5629812",
"0.55803573",
"0.5571935",
"0.5560601",
"0.5543277",
"0.5521936",
"0.5519573",
"0.5507645",
"0.5494354",
"0.5468219",
"0.5466479",
"0.54641414",
"0.5454444",
"0.5431546",
"0.5393026",
"0.53550965",
"0.5336911",
"0.53309834"
]
| 0.8039387 | 0 |
Function that tells if the given langcode matches the given locale. I.e. if all parts of appearing in the langcode (language, territory, script and encoding) are the same as the matching parts of the locale. | def langcode_matches_locale(langcode, locale):
langcode_parts = parse_langcode(langcode)
locale_parts = parse_langcode(locale)
if not langcode_parts or not locale_parts:
# to match, both need to be valid langcodes (need to have at least
# language specified)
return False
# Check parts one after another. If some part appears in the langcode and
# doesn't match the one from the locale (or is missing in the locale),
# return False, otherwise they match
for part in ("language", "territory", "script", "encoding"):
if langcode_parts[part] and langcode_parts[part] != locale_parts.get(part):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_valid_language_code(code):\n try:\n iso639.languages.get(part3=code)\n return True\n except KeyError:\n return False",
"def find_best_locale_match(locale, langcodes):\n\n score_map = { \"language\" : 1000,\n \"territory\": 100,\n \"script\" : 10,\n \"encoding\" : 1 }\n\n def get_match_score(locale, langcode):\n score = 0\n\n locale_parts = parse_langcode(locale)\n langcode_parts = parse_langcode(langcode)\n if not locale_parts or not langcode_parts:\n return score\n\n for part, part_score in score_map.iteritems():\n if locale_parts[part] and langcode_parts[part]:\n if locale_parts[part] == langcode_parts[part]:\n # match\n score += part_score\n else:\n # not match\n score -= part_score\n elif langcode_parts[part] and not locale_parts[part]:\n # langcode has something the locale doesn't have\n score -= part_score\n\n return score\n\n scores = []\n\n # get score for each langcode\n for langcode in langcodes:\n scores.append((langcode, get_match_score(locale, langcode)))\n\n # find the best one\n sorted_langcodes = sorted(scores, key=lambda item_score: item_score[1], reverse=True)\n\n # matches matching only script or encoding or both are not useful\n if sorted_langcodes and sorted_langcodes[0][1] > score_map[\"territory\"]:\n return sorted_langcodes[0][0]\n else:\n return None",
"def compare_language(language):\n if language in module.availableLanguages:\n return True\n else:\n return False",
"def validate_locale(self, locale: str) -> bool:\n\n return locale in self.possible_locale_list",
"def is_supported_locale(locale):\n\n en_name = get_english_name(locale)\n return bool(en_name)",
"def validate_lang(lang):\n if lang in LANGUAGE_OPTIONS.keys():\n return True",
"def has_language(lang):\n kn = _get_keyboard_names()\n return kn.has_language(lang)",
"def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES",
"def verificar_pangrama(cadena):\n for i in range(len(ascii_lowercase)):\n if ascii_lowercase[i] in cadena.lower():\n continue\n else:\n return False\n return True",
"def IsLanguageTag(cls, string):\n return string.lower() in cls._LANGUAGE_PER_TAG_LOWER_CASE",
"def validate_language(language):\n\n try:\n lang_code = language_dict[language]\n except KeyError:\n lang_code = None\n return lang_code",
"def check_code(item_code):\r\n # RA matches\r\n if re.match(r'^MCRNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAN[0-9]{3,4}(\\.[0-9])?C?(\\.T)?$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAS[0-9]{5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^RNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RU[0-9]{5}(\\.T)?$', item_code):\r\n return True\r\n\r\n # Feature ID (RAN) matches\r\n if re.match(r'^RAN[0-9]{2,5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^(?P<code>RAN[1,2](\\.[0-9]{3,4}))$', item_code):\r\n return True\r\n\r\n return False",
"def test_languages(self):\n for i, item in enumerate(self._letters_proto.item):\n for code in item.language:\n # The language code should be in ISO 639 format and consists of\n # two letters for ISO 639-1 languages and three letters otherwise.\n self.assertLess(1, len(code))\n self.assertGreater(4, len(code))\n self.assertTrue(code.islower(), f'Line {i}: Language code should be '\n 'lower-case')\n if len(code) == 3:\n lang = pycountry.languages.get(alpha_3=code)\n self.assertTrue(lang, f'Failed to find language for code {code}')\n if hasattr(lang, 'alpha_2'):\n self.fail(f'Letter {i}: Please use two-letter code `{lang.alpha_2}`'\n f' instead of `{lang.alpha_3}` for {lang.name}')\n else:\n lang = pycountry.languages.get(alpha_2=code)\n self.assertTrue(lang, f'Failed to find language for code {code}')",
"def __contains__(self, query): # a contains method\r\n \r\n if query in self._languageSet or query[0].lower( ) +query[1:] in self._languageSet: # check if the given string is in language set or not\r\n return True # return True if present else False\r\n else:\r\n return False",
"def language_supported(self, iso_lang=\"ca-ES\"): # -> bool\n test_lang = \"\"\n if len(iso_lang) == 0:\n return False\n try:\n for sep in [\"-\", \"_\"]:\n if sep in iso_lang:\n test_lang = iso_lang.split(sep)[0]\n break\n except (AttributeError, NameError):\n return False\n try:\n for _test in [iso_lang, test_lang]:\n if _test in gtts.tts.tts_langs():\n return True\n except NameError:\n pass\n return False",
"def is_english(message, word_percentage=20, letter_percentage=85):\n words_match = get_english_count(message) * 100 >= word_percentage\n num_letters = len(remove_non_letters(message))\n message_letters_percentage = float(num_letters) / len(message) * 100\n letters_match = message_letters_percentage >= letter_percentage\n return words_match and letters_match",
"def can_handle_language(cls, language: Hashable) -> bool:\n\n # if language_list is set to `None` it means: support all languages\n if language is None or cls.language_list is None:\n return True\n\n return language in cls.language_list",
"def has_different_coding_regions(transcript_list):\n previous_cds = transcript_list[0].get_cds()\n for tx in transcript_list[1:]:\n if not identical_cds(previous_cds,tx.get_cds()):\n return True\n return False",
"def is_english(text):\n\n lang = langid.classify(text)\n if lang and 'en' in lang[0]:\n return True\n return False",
"def requires_matching_languages(self):\n return self._requires_matching_languages",
"def _has_language_changed(self, pid, lid, modified_at):\n if not self._has_project_changed(pid, modified_at):\n return False\n\n # look up the existing language entry\n cache_key = pid\n if cache_key in self.ts_languages_cache:\n ts_languages = self.ts_languages_cache[cache_key]\n else:\n ts_languages = self.get_url('https://cdn.door43.org/v2/ts/{0}/languages.json'.format(pid), True)\n if ts_languages:\n self.ts_languages_cache[cache_key] = ts_languages\n\n if not ts_languages:\n return False\n try:\n languages = json.loads(ts_languages)\n except:\n return False\n\n # check if the resource has been modified\n for lang in languages:\n if lang['language']['slug'] != lid:\n continue\n if 'long_date_modified' in lang['language']:\n return date_is_older(lang['language']['long_date_modified'], modified_at)\n else:\n # backwards compatibility\n return date_is_older(lang['language']['date_modified'], make_legacy_date(modified_at))",
"def _does_words_matches(original_word: str, encoded_word: str) -> bool:\n return(\n len(original_word) == len(encoded_word) and\n original_word[0] == encoded_word[0] and\n original_word[-1] == encoded_word[-1] and\n sorted(original_word[1:-1]) == sorted(encoded_word[1:-1])\n )",
"def is_valid_language(self, file):\n if not self.languages or get_file_type(file[\"path\"]) in self.languages:\n return True\n return False",
"def is_valid_language(self, file):\n if not self.languages or get_file_type(file[\"path\"]) in self.languages:\n return True\n return False",
"def get_locale_name(code):\n language_map = dict(django.conf.global_settings.LANGUAGES)\n\n # check for exact match\n if code in language_map:\n return language_map[code]\n\n # try for the language, fall back to just using the code\n language = code.split(\"-\")[0]\n return language_map.get(language, code)",
"def is_valid(postal_code):\n return bool(re.match(UK_POST_CODE_REGEX, postal_code, re.VERBOSE)) if postal_code else False",
"def _is_released(self, lang_code):\r\n return any(lang_code.lower().startswith(released_lang.lower()) for released_lang in self.released_langs)",
"def is_script_lang(elem_1):\n script_langs = [ 'sh', 'bash', 'csh', 'tcsh', 'zsh', 'perl', 'python', 'php', 'ruby']\n basename = os.path.basename(elem_1)\n if basename in script_langs:\n return True\n else:\n return False",
"def test_good_values_for_validate_locale_code(good_value):\n bcvalidators.validate_locale_code(good_value)",
"def language_supported(self,\n _iso_lang=\"en-US\",\n alt_local_url=\"\"): # -> bool\n _found_name = \"\"\n if alt_local_url.startswith(\"http\"):\n self.url = alt_local_url\n if self.ok:\n return self.ok\n if not bool(self.verified_voices):\n self.update_rhvoice_checklist()\n if not bool(self.verified_voices):\n self.ok = False\n return False\n self.ok = False\n for _search in [_iso_lang.lower(), _iso_lang.split(\"-\")[0].lower()]:\n for item in self.checklist:\n if item[0].lower().startswith(_search):\n self.checked_lang = item[0]\n self.ok = True\n break\n if len(self.checked_lang) != 0:\n break\n if len(self.checked_lang) != 0:\n for item in self.checklist:\n if bool(self.common.debug):\n print(item)\n if item[2] == _iso_lang.lower():\n self.checked_lang = item[0]\n self.ok = True\n break\n if self.ok:\n help_heading = self.help_heading\n help_url = self.help_url\n print(f\"\"\"\nChecking {help_heading} voices for `{_iso_lang}`\n========================================\n\n<{help_url}>\n\"\"\")\n return self.ok"
]
| [
"0.67303485",
"0.6682557",
"0.64869493",
"0.6195389",
"0.57334167",
"0.56621647",
"0.5462071",
"0.5413448",
"0.53151387",
"0.5269527",
"0.5230982",
"0.52007663",
"0.5193894",
"0.51931304",
"0.51603484",
"0.50795096",
"0.5066487",
"0.5033391",
"0.50066024",
"0.5000865",
"0.49936724",
"0.49285176",
"0.49182612",
"0.49182612",
"0.49097076",
"0.48952848",
"0.48945042",
"0.48927945",
"0.4877263",
"0.485346"
]
| 0.8122806 | 0 |
Find the best match for the locale in a list of langcodes. This is useful when e.g. pt_BR is a locale and there are possibilities to choose an item (e.g. rnote) for a list containing both pt and pt_BR or even also pt_PT. | def find_best_locale_match(locale, langcodes):
score_map = { "language" : 1000,
"territory": 100,
"script" : 10,
"encoding" : 1 }
def get_match_score(locale, langcode):
score = 0
locale_parts = parse_langcode(locale)
langcode_parts = parse_langcode(langcode)
if not locale_parts or not langcode_parts:
return score
for part, part_score in score_map.iteritems():
if locale_parts[part] and langcode_parts[part]:
if locale_parts[part] == langcode_parts[part]:
# match
score += part_score
else:
# not match
score -= part_score
elif langcode_parts[part] and not locale_parts[part]:
# langcode has something the locale doesn't have
score -= part_score
return score
scores = []
# get score for each langcode
for langcode in langcodes:
scores.append((langcode, get_match_score(locale, langcode)))
# find the best one
sorted_langcodes = sorted(scores, key=lambda item_score: item_score[1], reverse=True)
# matches matching only script or encoding or both are not useful
if sorted_langcodes and sorted_langcodes[0][1] > score_map["territory"]:
return sorted_langcodes[0][0]
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def guess_language(lang_list=None):\n\tlang_codes = frappe.request.accept_languages.values()\n\tif not lang_codes:\n\t\treturn frappe.local.lang\n\n\tguess = None\n\tif not lang_list:\n\t\tlang_list = get_all_languages() or []\n\n\tfor l in lang_codes:\n\t\tcode = l.strip()\n\t\tif not isinstance(code, text_type):\n\t\t\tcode = text_type(code, 'utf-8')\n\t\tif code in lang_list or code == \"en\":\n\t\t\tguess = code\n\t\t\tbreak\n\n\t\t# check if parent language (pt) is setup, if variant (pt-BR)\n\t\tif \"-\" in code:\n\t\t\tcode = code.split(\"-\")[0]\n\t\t\tif code in lang_list:\n\t\t\t\tguess = code\n\t\t\t\tbreak\n\n\treturn guess or frappe.local.lang",
"def best_match_language(self):\n if not self.accept_language:\n return None\n return self.accept_language.best_match(\n i18n.get_available_languages())",
"def search_language(string, allowed_languages=None):\n\n if allowed_languages:\n allowed_languages = set(Language.fromguessit(lang) for lang in allowed_languages)\n\n confidence = 1.0 # for all of them\n\n for prop, language, lang, word in find_possible_languages(string, allowed_languages):\n pos = string.find(word)\n end = pos + len(word)\n\n # only allow those languages that have a 2-letter code, those that\n # don't are too esoteric and probably false matches\n # if language.lang not in lng3_to_lng2:\n # continue\n\n # confidence depends on alpha2, alpha3, english name, ...\n if len(lang) == 2:\n confidence = 0.8\n elif len(lang) == 3:\n confidence = 0.9\n elif prop == 'subtitleLanguage':\n confidence = 0.6 # Subtitle prefix found with language\n else:\n # Note: we could either be really confident that we found a\n # language or assume that full language names are too\n # common words and lower their confidence accordingly\n confidence = 0.3 # going with the low-confidence route here\n\n return Guess({prop: language}, confidence=confidence, input=string, span=(pos, end))\n\n return None",
"def closest_match(desired_language: {str, Language}, supported_languages: list,\n max_distance: int=25) -> (str, int):\n # Quickly return if the desired language is directly supported\n if desired_language in supported_languages:\n return desired_language, 0\n\n # Reduce the desired language to a standard form that could also match\n desired_language = standardize_tag(desired_language)\n if desired_language in supported_languages:\n return desired_language, 0\n\n match_distances = [\n (supported, tag_distance(desired_language, supported))\n for supported in supported_languages\n ]\n match_distances = [\n (supported, distance) for (supported, distance) in match_distances\n if distance <= max_distance\n ] + [('und', 1000)]\n\n match_distances.sort(key=itemgetter(1))\n return match_distances[0]",
"def get_language(tokens, stopwords):\n\n languages_ratios = dict()\n\n words = [str(word).lower() for word in tokens]\n words_set = set(words)\n\n for language in stopwords.keys():\n common_elements = words_set.intersection(stopwords[language])\n\n languages_ratios[language] = len(common_elements) # language score\n\n return max(languages_ratios, key=languages_ratios.get)",
"def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])",
"def extract_lang(lang, lang_available):\n reg = r\"\"\"[a-z]{2}[_][A-Z]{2}\"\"\" # xx_XX\n lang_user = re.findall(reg, lang.replace(\"-\", \"_\"))\n # code below will list matching languages to\n # first two letters (\"en\") instead of whole\n # language code (\"en_GB\", \"en_US\", etc.).\n # Not really a problem when we've only 2 languages...\n lang_available_user = list()\n for l in lang_user:\n for lat in lang_available:\n if l[:2] in lat:\n lang_available_user.append(lat)\n return lang_available_user[0]",
"def _try_to_get_an_english_value(self, localized_values):\n if not localized_values:\n return None\n\n for localized_value in localized_values:\n if localized_value.language in self.ENGLISH_LANGUAGE_CODES:\n return localized_value.value\n\n return first_or_default(localized_values).value",
"def find_possible_languages(string, allowed_languages=None):\n\n common_words = None\n if allowed_languages:\n common_words = LNG_COMMON_WORDS_STRICT\n else:\n common_words = LNG_COMMON_WORDS\n\n words = find_words(string)\n\n valid_words = []\n for word in words:\n lang_word = word.lower()\n key = 'language'\n for prefix in subtitle_prefixes:\n if lang_word.startswith(prefix):\n lang_word = lang_word[len(prefix):]\n key = 'subtitleLanguage'\n for suffix in subtitle_suffixes:\n if lang_word.endswith(suffix):\n lang_word = lang_word[:len(suffix)]\n key = 'subtitleLanguage'\n for prefix in lang_prefixes:\n if lang_word.startswith(prefix):\n lang_word = lang_word[len(prefix):]\n if lang_word not in common_words:\n try:\n lang = Language.fromguessit(lang_word)\n if allowed_languages:\n if lang.name.lower() in allowed_languages or lang.alpha2.lower() in allowed_languages or lang.alpha3.lower() in allowed_languages:\n valid_words.append((key, lang, lang_word, word))\n # Keep language with alpha2 equivalent. Others are probably\n # uncommon languages.\n elif lang == 'mul' or hasattr(lang, 'alpha2'):\n valid_words.append((key, lang, lang_word, word))\n except babelfish.Error:\n pass\n return valid_words",
"def get_language_code_coding_and_locale():\n # type: () -> Tuple[str, str, str]\n try:\n language_code, encoding = locale.getdefaultlocale()\n if language_code and encoding:\n used_locale = \".\".join([language_code, encoding])\n else:\n language_code = \"unknown\"\n encoding = \"unknown\"\n used_locale = \"unable to retrieve locale\"\n except Exception as e:\n language_code = \"unknown\"\n encoding = \"unknown\"\n used_locale = \"unable to retrieve locale: %s\" % (str(e))\n\n return language_code, encoding, used_locale",
"def get_language(lang_code) -> str:\n langs = defaultdict(lambda: \"en\", {\"ru\": \"ru\"})\n return langs[lang_code.split(\"-\")[0]] if lang_code else \"en\"",
"def negotiate(\n cls,\n preferred: Iterable[str],\n available: Iterable[str],\n sep: str = '_',\n aliases: Mapping[str, str] = LOCALE_ALIASES,\n ) -> Locale | None:\n identifier = negotiate_locale(preferred, available, sep=sep,\n aliases=aliases)\n if identifier:\n return Locale.parse(identifier, sep=sep)\n return None",
"def get_language_script(script):\n languages_scripts = {\n 'arab': ('ara', 'per'),\n 'cyrl': ('bel', 'chu', 'mac', 'rus', 'srp', 'ukr'),\n 'grek': ('grc', 'gre'),\n 'hani': ('chi', 'jpn'),\n 'hebr': ('heb', 'lad', 'yid'),\n 'jpan': ('jpn', ),\n 'kore': ('kor', ),\n 'zyyy': ('chi', )\n }\n if script in languages_scripts:\n languages = ([marc21.lang_from_008] +\n marc21.langs_from_041_a +\n marc21.langs_from_041_h)\n for lang in languages:\n if lang in languages_scripts[script]:\n return '-'.join([lang, script])\n error_print('WARNING LANGUAGE SCRIPTS:', marc21.bib_id,\n script, '008:', marc21.lang_from_008,\n '041$a:', marc21.langs_from_041_a,\n '041$h:', marc21.langs_from_041_h)\n return '-'.join(['und', script])",
"def get_locale_name(code):\n language_map = dict(django.conf.global_settings.LANGUAGES)\n\n # check for exact match\n if code in language_map:\n return language_map[code]\n\n # try for the language, fall back to just using the code\n language = code.split(\"-\")[0]\n return language_map.get(language, code)",
"def to_language(arg: str) -> Tuple[Union[str, None], str]: \n if (low:= arg.lower()) in LANGUAGES:\n return arg\n else:\n return LANGCODES.get(low, None)",
"def existing_paradigm(aff_accepted, morphemes):\n for t in morphemes:\n if set(morphemes[t][1]) == set(aff_accepted):\n return t\n return None",
"def get_best_match(self, list):\n raise NotImplementedError",
"def _detect_language(self, text):\n\n ratios = self._calculate_languages_ratios(text)\n\n most_rated_language = max(ratios, key=ratios.get)\n\n return most_rated_language",
"def get_lang_code(lang_code):\r\n if lang_code not in constants.SUPPORTED_LANG_CODES_ANALYZERS:\r\n return constants.FALLBACK_LANG_CODE\r\n return lang_code",
"def langcode_matches_locale(langcode, locale):\n\n langcode_parts = parse_langcode(langcode)\n locale_parts = parse_langcode(locale)\n\n if not langcode_parts or not locale_parts:\n # to match, both need to be valid langcodes (need to have at least\n # language specified)\n return False\n\n # Check parts one after another. If some part appears in the langcode and\n # doesn't match the one from the locale (or is missing in the locale),\n # return False, otherwise they match\n for part in (\"language\", \"territory\", \"script\", \"encoding\"):\n if langcode_parts[part] and langcode_parts[part] != locale_parts.get(part):\n return False\n\n return True",
"def Locale_FindLanguageInfo(*args, **kwargs):\n return _gdi_.Locale_FindLanguageInfo(*args, **kwargs)",
"def FindLanguageInfo(*args, **kwargs):\n return _gdi_.Locale_FindLanguageInfo(*args, **kwargs)",
"def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())",
"def get_language(lang_list: list = None) -> str:\n\tis_logged_in = frappe.session.user != \"Guest\"\n\n\t# fetch language from form_dict\n\tif frappe.form_dict._lang:\n\t\tlanguage = get_lang_code(frappe.form_dict._lang or get_parent_language(frappe.form_dict._lang))\n\t\tif language:\n\t\t\treturn language\n\n\t# use language set in User or System Settings if user is logged in\n\tif is_logged_in:\n\t\treturn frappe.local.lang\n\n\tlang_set = set(lang_list or get_all_languages() or [])\n\n\t# fetch language from cookie\n\tpreferred_language_cookie = get_preferred_language_cookie()\n\n\tif preferred_language_cookie:\n\t\tif preferred_language_cookie in lang_set:\n\t\t\treturn preferred_language_cookie\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fetch language from request headers\n\taccept_language = list(frappe.request.accept_languages.values())\n\n\tfor language in accept_language:\n\t\tif language in lang_set:\n\t\t\treturn language\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fallback to language set in System Settings or \"en\"\n\treturn frappe.db.get_default(\"lang\") or \"en\"",
"def resolveCountryCode(country_code):\n country_name = None\n if len(country_code) > 2:\n country_name = country_code\n country_code = next((cc for cc, country in countries.items() if country == country_code), None)\n if country_code not in countries:\n logger.error(\"Country code %s unknown. For a list of know codes execute:\")\n logger.error(sys.argv[0] + ' --list-countries \\tList all available countries that can be blocked.')\n sys.exit(255)\n if not country_name:\n country_name = countries[country_code]\n return [country_code, country_name]",
"def best_ans(core_nlp, question, answer_list):\n scores = {}\n start_time = time()\n q_dependencies = list(generate(core_nlp.parse(question)))\n elapsed_time = time() - start_time\n print 'Time taken to create q dependencies :', elapsed_time\n for index, value in enumerate(answer_list):\n start_time = time()\n a_dependencies = list(generate(core_nlp.parse(unidecode(value))))\n elapsed_time = time() - start_time\n print 'Time taken to create a dependencies :', elapsed_time\n scores[index + 1] = find_score(q_dependencies, a_dependencies)\n\n print 'Scores :', str(scores)\n\n min_scores = min(scores.values())\n return [k for k, v in scores.iteritems() if v == min_scores]",
"def get_lang(self):\n\n path = self.get_lang_path()\n for language in self.languages:\n if language in path:\n return language",
"def get_locale_from_accept_header(request):\n header = request.headers.get(\"Accept-Language\", '')\n parsed = parse_accept_language_header(header)\n if parsed is None:\n return None\n locale_list_sorted_by_q = sorted(parsed.iterkeys(), reverse=True)\n locale = Locale.negotiate(locale_list_sorted_by_q, config.locales, sep='_')\n return str(locale)",
"def select_best_alphabetical_word(ref_word, word_list):\n case_mode = -1 if ref_word[0].isupper() else 0\n term_ascii_code = {term: [ord(ch) for ch in term] for term in word_list}\n\n for ascii_code in term_ascii_code.values():\n for i in xrange(len(ascii_code)):\n code = ascii_code[i]\n\n # Non a-zA-Z chars will have a 0 value\n if code < 65 or 90 < code < 97 or code > 122:\n ascii_code[i] = 0\n\n if case_mode >= 0:\n ascii_val = min(term_ascii_code.values())\n\n tkn_list = [t for t, v in term_ascii_code.items() if v == ascii_val]\n\n if len(tkn_list) > 1:\n return select_by_hash(tkn_list)\n\n return tkn_list[0]\n else:\n ascii_val = max(term_ascii_code.values())\n\n tkn_list = [t for t, v in term_ascii_code.items() if v == ascii_val]\n\n if len(tkn_list) > 1:\n return select_by_hash(tkn_list)\n\n return tkn_list[0]",
"def search_lang(self,strz):\n\t\tfor lang in languages: #languages = list of allow lang words\n\t\t\tif lang in strz:\n\t\t\t\tif len(self.language)>0:\n\t\t\t\t\tself.language+='.'+lang.replace(\".\",\"\")\n\t\t\t\telse:\n\t\t\t\t\tself.language+=lang.replace(\".\",\"\")\n\t\t\t\tstrz =strz.replace(lang,\"\")\n\t\treturn strz"
]
| [
"0.6443872",
"0.61079204",
"0.61042756",
"0.607895",
"0.5849928",
"0.5608303",
"0.55125475",
"0.5488135",
"0.5405592",
"0.5380822",
"0.5371423",
"0.5334464",
"0.53292495",
"0.5322359",
"0.53013057",
"0.52993584",
"0.52987677",
"0.52869475",
"0.526974",
"0.5255281",
"0.5253893",
"0.52530396",
"0.5226489",
"0.52107304",
"0.5128195",
"0.51174456",
"0.5115044",
"0.51042324",
"0.51031375",
"0.50888246"
]
| 0.80921227 | 0 |
Procedure setting the system to use the given locale and store it in to the ksdata.lang object (if given). DOES NOT PERFORM ANY CHECKS OF THE GIVEN LOCALE. $LANG must be set by the caller in order to set the language used by gettext. Doing this in a threadsafe way is up to the caller. We also try to set a proper console font for the locale in text mode. If the font for the locale can't be displayed in the Linux console, we fall back to the English locale. | def setup_locale(locale, lang=None, text_mode=False):
if lang:
lang.lang = locale
# not all locales might be displayable in text mode
if text_mode:
# check if the script corresponding to the locale/language
# can be displayed by the Linux console
# * all scripts for the given locale/language need to be
# supported by the linux console
# * otherwise users might get a screen full of white rectangles
# (also known as "tofu") in text mode
# then we also need to check if we have information about what
# font to use for correctly displaying the given language/locale
script_supported = locale_supported_in_console(locale)
log.debug("scripts found for locale %s: %s", locale, get_locale_scripts(locale))
console_fonts = get_locale_console_fonts(locale)
log.debug("console fonts found for locale %s: %s", locale, console_fonts)
font_set = False
if script_supported and console_fonts:
# try to set console font
for font in console_fonts:
if set_console_font(font):
# console font set successfully, skip the rest
font_set = True
break
if not font_set:
log.warning("can't set console font for locale %s", locale)
# report what exactly went wrong
if not(script_supported):
log.warning("script not supported by console for locale %s", locale)
if not(console_fonts): # no fonts known for locale
log.warning("no console font found for locale %s", locale)
if script_supported and console_fonts:
log.warning("none of the suggested fonts can be set for locale %s", locale)
log.warning("falling back to the English locale")
locale = constants.DEFAULT_LANG
os.environ["LANG"] = locale # pylint: disable=environment-modify
# set the locale to the value we have selected
log.debug("setting locale to: %s", locale)
setenv("LANG", locale)
locale_mod.setlocale(locale_mod.LC_ALL, locale) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_i18n(lang, language=None):\n import gettext\n import locale\n import warnings\n import os\n\n try:\n locale.setlocale(locale.LC_ALL, lang)\n locale.setlocale(locale.LC_MESSAGES, language or lang)\n os.environ[\"LANG\"] = lang\n os.environ[\"LANGUAGE\"] = language or lang.split(\".\")[0]\n except locale.Error:\n warnings.warn(f\"locale is not supported: {lang}\")\n gettext.bindtextdomain(\"messages\", localedir=LOCALEDIR)",
"def _initializeLocale():\n \n if sys.platform == constants.WIN32:\n locale.setlocale(locale.LC_ALL, \"\")\n else:\n if constants.LC_ALL in os.environ:\n try:\n locale.setlocale(locale.LC_ALL, os.environ[constants.LC_ALL])\n return\n except locale.Error:\n # First try did not work, encoding must be set first then set locale.\n pass\n languageCode, encoding = locale.getdefaultlocale()\n if languageCode is None:\n languageCode = \"en_US\"\n # Set the encoding of the Python environment if no encoding is set.\n if encoding is None:\n encoding = constants.UTF8\n if encoding.lower() == \"utf\":\n encoding = constants.UTF8\n try:\n locale.setlocale(locale.LC_ALL, \"%s.%s\" % (languageCode, encoding))\n except locale.Error:\n try:\n locale.setlocale(locale.LC_ALL, \"en_US.UTF-8\")\n except locale.Error:\n locale.setlocale(locale.LC_ALL, \"C\")",
"def setup_locale_environment(locale=None, text_mode=False, prefer_environment=False):\n\n # pylint: disable=environment-modify\n\n # Look for a locale in the environment. If the variable is setup but\n # empty it doesn't count, and some programs (KDE) actually do this.\n # If prefer_environment is set, the environment locale can override\n # the parameter passed in. This can be used, for example, by initial-setup,\n # to prefer the possibly-more-recent environment settings before falling back\n # to a locale set at install time and saved in the kickstart.\n if not locale or prefer_environment:\n for varname in (\"LANGUAGE\", \"LC_ALL\", \"LC_MESSAGES\", \"LANG\"):\n if varname in os.environ and os.environ[varname]:\n locale = os.environ[varname]\n break\n\n # Look for a locale in the firmware if there was nothing in the environment\n if not locale:\n locale = get_firmware_language(text_mode)\n\n # parse the locale using langtable\n if locale:\n env_langs = get_language_locales(locale)\n if env_langs:\n # the first langauge is the best match\n locale = env_langs[0]\n else:\n log.error(\"Invalid locale '%s' given on command line, kickstart or environment\", locale)\n locale = None\n\n # If langtable returned no locales, or if nothing was configured, fall back to the default\n if not locale:\n locale = constants.DEFAULT_LANG\n\n # Save the locale in the environment\n os.environ[\"LANG\"] = locale\n\n # Cleanup the rest of the environment variables\n for varname in (\"LANGUAGE\", \"LC_ALL\", \"LC_MESSAGES\"):\n if varname in os.environ:\n del os.environ[varname]",
"def InitLocale(self):\n self.ResetLocale()\n if 'wxMSW' in wx.PlatformInfo:\n import locale\n try:\n lang, enc = locale.getdefaultlocale()\n self._initial_locale = wx.Locale(lang, lang[:2], lang)\n # locale.setlocale(locale.LC_ALL, lang)\n # locale.setlocale(locale.LC_ALL, 'C')\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale: lang = {lang}\\n')\n print(lang)\n except (ValueError, locale.Error) as ex:\n target = wx.LogStderr()\n orig = wx.Log.SetActiveTarget(target)\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale:except-0 Unable to set default locale: \\'{ex}\\'\\n')\n print(\"Unable to set default locale: '{}'\".format(ex))\n wx.LogError(\"Unable to set default locale: '{}'\".format(ex))\n wx.Log.SetActiveTarget(orig)\n try:\n locale.setlocale(locale.LC_ALL, lang.replace('_', '-'))\n except (ValueError, locale.Error) as ex:\n locale.setlocale(locale.LC_ALL, lang.replace('-', '_'))\n target = wx.LogStderr()\n orig = wx.Log.SetActiveTarget(target)\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale:except-1 Unable to set default locale: \\'{ex}\\'\\n')\n print(\"Unable to set default locale: '{}'\".format(ex))\n wx.LogError(\"Unable to set default locale: '{}'\".format(ex))\n wx.Log.SetActiveTarget(orig)",
"def set_lang(lang, graceful_fail = False, **kwargs):\r\n registry = pylons.request.environ['paste.registry']\r\n if not lang:\r\n registry.replace(pylons.translator, NullTranslations())\r\n else:\r\n translator = _get_translator(lang, graceful_fail = graceful_fail, **kwargs)\r\n registry.replace(pylons.translator, translator)",
"def setPortalLocale( self ):\n info = getLanguageInfo( self )\n\n # find default and effective locale settings\n def_locale = info.get( sys.platform + '_locale' ) or info.get( os.name + '_locale' )\n cur_locale = getlocale()\n cur_locale = None not in cur_locale and '.'.join( cur_locale ) or ''\n\n # check whether locale is already ok\n if def_locale is None or cur_locale.lower() == def_locale.lower():\n return\n\n # change effective locale\n try:\n setlocale( LC_ALL, def_locale )\n except Exceptions.LocaleError:\n pass",
"def do_lang(self, lang):\n\n self.lang = lang\n print(\"Set language to %s\" % lang)",
"def set_default_language(language_code):\n thread_locals.DEFAULT_LANGUAGE = language_code",
"def set_locale_de():\n try:\n if platform.system() == \"Windows\":\n locale.setlocale(locale.LC_ALL, \"German\")\n else:\n locale.setlocale(locale.LC_ALL, \"de_DE.utf8\")\n except locale.Error:\n pass",
"def setLanguage(self, translator: ghidra.program.util.LanguageTranslator, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...",
"def __initializeLocale(self):\n langdomain = 'tortugaStrings'\n\n # Locate the Internationalization stuff\n localedir = '../share/locale' \\\n if os.path.exists('../share/locale') else \\\n os.path.join(self._cm.getRoot(), 'share/locale')\n\n gettext.install(langdomain, localedir)",
"def set_locale(cls, force=None):\n # disable i18n if config.locales array is empty or None\n if not config.locales:\n return None\n # 1. force locale if provided\n locale = force\n if locale not in config.locales:\n # 2. retrieve locale from url query string\n locale = cls.request.get(\"hl\", None)\n if locale not in config.locales:\n # 3. retrieve locale from cookie\n locale = cls.request.cookies.get('hl', None)\n if locale not in config.locales:\n # 4. retrieve locale from accept language header\n locale = get_locale_from_accept_header(cls.request)\n if locale not in config.locales:\n # 5. detect locale from IP address location\n territory = get_territory_from_ip(cls) or 'ZZ'\n locale = str(Locale.negotiate(territory, config.locales))\n if locale not in config.locales:\n # 6. use default locale\n locale = i18n.get_store().default_locale\n i18n.get_i18n().set_locale(locale)\n # save locale in cookie with 26 weeks expiration (in seconds)\n cls.response.set_cookie('hl', locale, max_age = 15724800)\n return locale",
"def set_lang(self, lang: LangEnum) -> None:\n self._logger.debug(\"running\")\n self._base_strings = strings[lang]\n self._logger.debug(\"done\")",
"def activate(locale, path=None):\r\n if path is None:\r\n path = _DEFAULT_LOCALE_PATH\r\n if locale not in _TRANSLATIONS:\r\n translation = gettext_module.translation('humanize', path, [locale])\r\n _TRANSLATIONS[locale] = translation\r\n _CURRENT.locale = locale\r\n return _TRANSLATIONS[locale]",
"def set_language(self, lang):\n self.lang = lang",
"def setLocale(self, value):\n return self._set(locale=value)",
"def lang_init():\n _locale, _encoding = locale.getdefaultlocale() # Default system values\n path = os.path.join(os.path.dirname(sys.argv[0]), 'localization/lang')\n if os.path.exists(path):\n lang = gettext.translation('UnrulyPuzzlePython', path, [_locale],\n fallback=True)\n else:\n lang = gettext.translation('UnrulyPuzzlePython', path,\n fallback=True)\n return lang.gettext",
"def set_utf8_locale():\n lang, encoding = locale.getlocale()\n if encoding != 'UTF-8':\n locale.setlocale(locale.LC_CTYPE, (lang, 'UTF-8'))",
"def with_locale(self, locale):\n self.__locale = locale\n return self",
"def set_default_language(lang):\n\tif frappe.db.get_default(\"lang\") != lang:\n\t\tfrappe.db.set_default(\"lang\", lang)\n\tfrappe.local.lang = lang",
"def set_default_language(lang):\n\tif frappe.db.get_default(\"lang\") != lang:\n\t\tfrappe.db.set_default(\"lang\", lang)\n\tfrappe.local.lang = lang",
"def do_locale(args):\r\n # Global can't be defined at module level. Processes are wierd. pylint: disable=W0601\r\n global ARGS\r\n signal.signal(signal.SIGINT, signal.SIG_IGN) # Set the workers to ignore KeyboardInterrupts.\r\n # Unpack arguments\r\n lang, langs, stem, cstem, modfilter, brname, browser, ARGS = args\r\n parseargs()\r\n # A Hack. CN has a different structure, so use a different url form.\r\n if lang == 'cn':\r\n stem = cstem\r\n # Reset the driver between rounds\r\n restart_driver(browser)\r\n # Log into the site, so you can access the modules.\r\n try:\r\n log_in(lang)\r\n except Exception:\r\n DRIVER.quit()\r\n return '\"Login to {0} failed. That breaks the whole locale, look into it:\\n{1}\"'.format(\r\n lang, tidy_error().replace('\"', '\"\"'))\r\n\r\n # Start recording results.\r\n result = '_'.join([lang.upper(), brname.upper()])\r\n for mod in modfilter:\r\n try:\r\n # Figure out the locale coding.\r\n url = stem.format(langs[lang][0].replace('-', '_'), MODULES[mod][lang])\r\n DRIVER.get(url)\r\n begin_module()\r\n # Try to do the module\r\n for elem in SCRIPTS[mod]:\r\n domo(elem)\r\n result += ',\"{0}: PASS\"'.format(get_time())\r\n # Something goes wrong, document it and go to the next module.\r\n except Exception:\r\n result += ',\"{0}: FAIL: {1}\"'.format(get_time(), tidy_error().replace('\"', '\"\"'))\r\n draw_failure(lang, mod)\r\n DRIVER.quit()\r\n return result",
"def set_language(request):\r\n user = request.user\r\n lang_pref = request.POST.get('language', None)\r\n\r\n if lang_pref:\r\n UserPreference.set_preference(user, LANGUAGE_KEY, lang_pref)\r\n return HttpResponse('{\"success\": true}')\r\n\r\n return HttpResponseBadRequest('no language provided')",
"def change_language(self, language=None, from_error=False):\n supported_langs = self.languages_and_comments.keys()\n if from_error:\n print _(\"Unsupported language. Available languages are:\")\n for lang in supported_langs:\n print \" - \", lang\n if not language:\n language = raw_input(_(\"Language: \"))\n if not language in supported_langs:\n self.change_language(None, True)\n else:\n self.default_lang = language",
"def on_language_changed(self, locale_code: str):\n self.localisationsettings.formats.on_language_changed(locale_code) # XXX: notify\n self.localisationsettings.keyboard.on_language_changed(locale_code) # XXX: notify\n self.translate_to(locale_code)\n self.mainwindow.current_language = localization.language_from_locale(locale_code)",
"def init_translations():\n if \"@lang\" in input.load_input():\n lang = input.get_lang()\n try:\n trad = gettext.GNUTranslations(open(\"../course/common_student/$i18n/\" + lang + \".mo\", \"rb\"))\n except FileNotFoundError:\n trad = gettext.NullTranslations()\n trad.install()\n return lang\n trad = gettext.NullTranslations()\n trad.install()\n return \"en\"",
"def set_faker_locale(self, locale):\n try:\n self._faker = faker.Faker(locale)\n except AttributeError:\n raise Exception(f\"Unknown locale for fake data: '{locale}'\")",
"def set_language(req):\n try:\n user_id = req.user\n lang = req.json_body['data']['lang']\n except KeyError as e:\n msg = req.get_error_msg(e)\n return send_error_response(msg)\n try:\n rt.update_state(user_id, language=lang)\n return send_success_response()\n except Exception as e:\n msg = req.get_error_msg(e, lang)\n return send_error_response(msg)",
"def __init__(self, *args, **kwargs):\n _gdi_.Locale_swiginit(self,_gdi_.new_Locale(*args, **kwargs))",
"def set_language(self, lang):\n\n self.language = lang\n\n self.add_metadata('DC', 'language', lang)"
]
| [
"0.71032214",
"0.68969995",
"0.6707774",
"0.6667625",
"0.6464172",
"0.64197475",
"0.63668007",
"0.6360514",
"0.6258837",
"0.62177956",
"0.621213",
"0.6055772",
"0.60205764",
"0.59804237",
"0.5965746",
"0.59169",
"0.58849823",
"0.58773863",
"0.56971943",
"0.56550485",
"0.56550485",
"0.5617554",
"0.55354893",
"0.5525967",
"0.543302",
"0.5400332",
"0.53942955",
"0.5324694",
"0.53224015",
"0.530023"
]
| 0.79741156 | 0 |
Function returning english name for the given locale. | def get_english_name(locale):
parts = parse_langcode(locale)
if "language" not in parts:
raise InvalidLocaleSpec("'%s' is not a valid locale" % locale)
name = langtable.language_name(languageId=parts["language"],
territoryId=parts.get("territory", ""),
scriptId=parts.get("script", ""),
languageIdQuery="en")
return upcase_first_letter(name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_native_name(locale):\n\n parts = parse_langcode(locale)\n if \"language\" not in parts:\n raise InvalidLocaleSpec(\"'%s' is not a valid locale\" % locale)\n\n name = langtable.language_name(languageId=parts[\"language\"],\n territoryId=parts.get(\"territory\", \"\"),\n scriptId=parts.get(\"script\", \"\"),\n languageIdQuery=parts[\"language\"],\n territoryIdQuery=parts.get(\"territory\", \"\"),\n scriptIdQuery=parts.get(\"script\", \"\"))\n\n return upcase_first_letter(name)",
"def english_name(self) -> str | None:\n return self.get_display_name(Locale('en'))",
"def get_display_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n retval = locale.languages.get(self.language)\n if retval and (self.territory or self.script or self.variant):\n details = []\n if self.script:\n details.append(locale.scripts.get(self.script))\n if self.territory:\n details.append(locale.territories.get(self.territory))\n if self.variant:\n details.append(locale.variants.get(self.variant))\n if self.modifier:\n details.append(self.modifier)\n detail_string = ', '.join(atom for atom in details if atom)\n if detail_string:\n retval += f\" ({detail_string})\"\n return retval",
"def get_localized_name(name):\n locale = \"{}_{}\".format(\n name[\"preferredLocale\"][\"language\"],\n name[\"preferredLocale\"][\"country\"]\n )\n return name['localized'].get(locale, '')",
"def get_language_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n return locale.languages.get(self.language)",
"def englishName(self):\n if self.enName:\n return self.enName\n return self.name",
"def Locale_GetLanguageName(*args, **kwargs):\n return _gdi_.Locale_GetLanguageName(*args, **kwargs)",
"def get_locale():\n return \"he\"",
"def GetLanguageName(*args, **kwargs):\n return _gdi_.Locale_GetLanguageName(*args, **kwargs)",
"def get_locale_name(code):\n language_map = dict(django.conf.global_settings.LANGUAGES)\n\n # check for exact match\n if code in language_map:\n return language_map[code]\n\n # try for the language, fall back to just using the code\n language = code.split(\"-\")[0]\n return language_map.get(language, code)",
"def Locale_GetLanguageCanonicalName(*args, **kwargs):\n return _gdi_.Locale_GetLanguageCanonicalName(*args, **kwargs)",
"def GetLanguageCanonicalName(*args, **kwargs):\n return _gdi_.Locale_GetLanguageCanonicalName(*args, **kwargs)",
"def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()",
"def get_territory_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n return locale.territories.get(self.territory or '')",
"def language_name(value):\n return pycountry.languages.get(alpha_2=value)",
"def get_language_name(self):\n return self.language_name",
"def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())",
"def get_script_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n return locale.scripts.get(self.script or '')",
"def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])",
"def gettext_for(locale='en'):\n return Translations.load(\n os.path.join(BASEDIR, 'app', 'translations'), [locale]\n ).ugettext",
"def get_current_locale(self) -> str:\n return self.locale",
"def get_currency_name(\n currency: str,\n count: float | decimal.Decimal | None = None,\n locale: Locale | str | None = LC_NUMERIC,\n) -> str:\n loc = Locale.parse(locale)\n if count is not None:\n try:\n plural_form = loc.plural_form(count)\n except (OverflowError, ValueError):\n plural_form = 'other'\n plural_names = loc._data['currency_names_plural']\n if currency in plural_names:\n currency_plural_names = plural_names[currency]\n if plural_form in currency_plural_names:\n return currency_plural_names[plural_form]\n if 'other' in currency_plural_names:\n return currency_plural_names['other']\n return loc.currencies.get(currency, currency)",
"def _get_locale() -> str:\n languages = flask.current_app.config['LANGUAGES'].keys()\n locale = flask.request.accept_languages.best_match(languages)\n\n # If no locale could be determined, fall back to the default.\n if locale is None:\n locale = flask.current_app.config['BABEL_DEFAULT_LOCALE']\n\n return locale",
"def get_locale():\n setting = Setting.query.filter(Setting.name == 'default_language').first()\n\n if setting is not None:\n return setting.value\n\n # Return default language when none found\n return 'en'",
"def get_name_translation(self):\n\t\treturn frappe.get_value(\n\t\t\t\"Translation\",\n\t\t\t{\"source_text\": self.doc_type, \"language\": frappe.local.lang or \"en\"},\n\t\t\t[\"name\", \"translated_text\"],\n\t\t\tas_dict=True,\n\t\t)",
"def primary_name(names):\n\tlangs = names.keys()\n\tif 'en' in langs:\n\t\treturn names['en']\n\treturn names[langs[0]]",
"def __str__(self) -> str:\n locale = getattr(self, 'locale', 'en')\n return '{} <{}>'.format(\n self.__class__.__name__, locale)",
"def get_level_name(self, level_id):\n for (english_name, level_package) in self.levels[self.game]:\n if level_package.lower() == level_id.lower():\n return english_name\n return None",
"def get_locale_from_accept_header(request):\n header = request.headers.get(\"Accept-Language\", '')\n parsed = parse_accept_language_header(header)\n if parsed is None:\n return None\n locale_list_sorted_by_q = sorted(parsed.iterkeys(), reverse=True)\n locale = Locale.negotiate(locale_list_sorted_by_q, config.locales, sep='_')\n return str(locale)",
"def get_locale(self):\n return self.locale"
]
| [
"0.7876312",
"0.77180785",
"0.7477184",
"0.7409858",
"0.7364249",
"0.7325009",
"0.7026951",
"0.69422877",
"0.693985",
"0.6770859",
"0.66346914",
"0.65739375",
"0.6378897",
"0.6323452",
"0.6313075",
"0.62488353",
"0.6085183",
"0.60159516",
"0.6012822",
"0.5982879",
"0.5909848",
"0.5901809",
"0.5885161",
"0.5858813",
"0.58304787",
"0.5803825",
"0.57777",
"0.57668746",
"0.5763273",
"0.57363695"
]
| 0.7984551 | 0 |
Function returning native name for the given locale. | def get_native_name(locale):
parts = parse_langcode(locale)
if "language" not in parts:
raise InvalidLocaleSpec("'%s' is not a valid locale" % locale)
name = langtable.language_name(languageId=parts["language"],
territoryId=parts.get("territory", ""),
scriptId=parts.get("script", ""),
languageIdQuery=parts["language"],
territoryIdQuery=parts.get("territory", ""),
scriptIdQuery=parts.get("script", ""))
return upcase_first_letter(name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_localized_name(name):\n locale = \"{}_{}\".format(\n name[\"preferredLocale\"][\"language\"],\n name[\"preferredLocale\"][\"country\"]\n )\n return name['localized'].get(locale, '')",
"def get_language_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n return locale.languages.get(self.language)",
"def get_display_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n retval = locale.languages.get(self.language)\n if retval and (self.territory or self.script or self.variant):\n details = []\n if self.script:\n details.append(locale.scripts.get(self.script))\n if self.territory:\n details.append(locale.territories.get(self.territory))\n if self.variant:\n details.append(locale.variants.get(self.variant))\n if self.modifier:\n details.append(self.modifier)\n detail_string = ', '.join(atom for atom in details if atom)\n if detail_string:\n retval += f\" ({detail_string})\"\n return retval",
"def get_english_name(locale):\n\n parts = parse_langcode(locale)\n if \"language\" not in parts:\n raise InvalidLocaleSpec(\"'%s' is not a valid locale\" % locale)\n\n name = langtable.language_name(languageId=parts[\"language\"],\n territoryId=parts.get(\"territory\", \"\"),\n scriptId=parts.get(\"script\", \"\"),\n languageIdQuery=\"en\")\n\n return upcase_first_letter(name)",
"def Locale_GetLanguageCanonicalName(*args, **kwargs):\n return _gdi_.Locale_GetLanguageCanonicalName(*args, **kwargs)",
"def get_locale_name(code):\n language_map = dict(django.conf.global_settings.LANGUAGES)\n\n # check for exact match\n if code in language_map:\n return language_map[code]\n\n # try for the language, fall back to just using the code\n language = code.split(\"-\")[0]\n return language_map.get(language, code)",
"def english_name(self) -> str | None:\n return self.get_display_name(Locale('en'))",
"def get_locale():\n return \"he\"",
"def GetLanguageCanonicalName(*args, **kwargs):\n return _gdi_.Locale_GetLanguageCanonicalName(*args, **kwargs)",
"def Locale_GetLanguageName(*args, **kwargs):\n return _gdi_.Locale_GetLanguageName(*args, **kwargs)",
"def GetLanguageName(*args, **kwargs):\n return _gdi_.Locale_GetLanguageName(*args, **kwargs)",
"def get_script_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n return locale.scripts.get(self.script or '')",
"def get_territory_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n return locale.territories.get(self.territory or '')",
"def language_name(value):\n return pycountry.languages.get(alpha_2=value)",
"def englishName(self):\n if self.enName:\n return self.enName\n return self.name",
"def get_currency_name(\n currency: str,\n count: float | decimal.Decimal | None = None,\n locale: Locale | str | None = LC_NUMERIC,\n) -> str:\n loc = Locale.parse(locale)\n if count is not None:\n try:\n plural_form = loc.plural_form(count)\n except (OverflowError, ValueError):\n plural_form = 'other'\n plural_names = loc._data['currency_names_plural']\n if currency in plural_names:\n currency_plural_names = plural_names[currency]\n if plural_form in currency_plural_names:\n return currency_plural_names[plural_form]\n if 'other' in currency_plural_names:\n return currency_plural_names['other']\n return loc.currencies.get(currency, currency)",
"def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())",
"def name(self, lang):\n return self.__class__.__name__",
"def get_current_locale(self) -> str:\n return self.locale",
"def get_locale(self):\n raise Unimplemented()",
"def _get_locale() -> str:\n languages = flask.current_app.config['LANGUAGES'].keys()\n locale = flask.request.accept_languages.best_match(languages)\n\n # If no locale could be determined, fall back to the default.\n if locale is None:\n locale = flask.current_app.config['BABEL_DEFAULT_LOCALE']\n\n return locale",
"def get_name() -> str:",
"def getWikiLanguageName():\r\n return \"wikidpad_mini_1_0\"",
"def get_language_name(self):\n return self.language_name",
"def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()",
"def normalize_locale(loc):\n return loc.lower().replace(\"_\", \"-\")",
"def get_locale_from_accept_header(request):\n header = request.headers.get(\"Accept-Language\", '')\n parsed = parse_accept_language_header(header)\n if parsed is None:\n return None\n locale_list_sorted_by_q = sorted(parsed.iterkeys(), reverse=True)\n locale = Locale.negotiate(locale_list_sorted_by_q, config.locales, sep='_')\n return str(locale)",
"def gettext_for(locale='en'):\n return Translations.load(\n os.path.join(BASEDIR, 'app', 'translations'), [locale]\n ).ugettext",
"def get_locale():\n return babel.Locale.parse(_get_locale())",
"def month_name(self, locale: Optional[str] = None) -> Index:\n return Index(self.to_series().dt.month_name(locale))"
]
| [
"0.71125025",
"0.69137526",
"0.6904741",
"0.6725412",
"0.66430044",
"0.6561066",
"0.6556972",
"0.65130925",
"0.6500227",
"0.6475753",
"0.63275784",
"0.6256066",
"0.61634076",
"0.61133057",
"0.61010855",
"0.60682327",
"0.5996985",
"0.5977526",
"0.59572643",
"0.58402485",
"0.5826062",
"0.5731688",
"0.57296336",
"0.572604",
"0.57135683",
"0.5698401",
"0.56506574",
"0.56491363",
"0.5638808",
"0.56314474"
]
| 0.8685434 | 0 |
Method that generates (i.e. returns a generator) available translations for the installer in the given localedir. | def get_available_translations(localedir=None):
localedir = localedir or gettext._default_localedir
# usually there are no message files for en
messagefiles = sorted(glob.glob(localedir + "/*/LC_MESSAGES/anaconda.mo") +
["blob/en/blob/blob"])
trans_gen = (path.split(os.path.sep)[-3] for path in messagefiles)
langs = set()
for trans in trans_gen:
parts = parse_langcode(trans)
lang = parts.get("language", "")
if lang and lang not in langs:
langs.add(lang)
# check if there are any locales for the language
locales = get_language_locales(lang)
if not locales:
continue
yield lang | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_translations():\n if \"@lang\" in input.load_input():\n lang = input.get_lang()\n try:\n trad = gettext.GNUTranslations(open(\"../course/common_student/$i18n/\" + lang + \".mo\", \"rb\"))\n except FileNotFoundError:\n trad = gettext.NullTranslations()\n trad.install()\n return lang\n trad = gettext.NullTranslations()\n trad.install()\n return \"en\"",
"def _po_packages(self):\n for name in self.distribution.po:\n source_dir = os.path.join(self.distribution.po_dir, name)\n build_dir = os.path.join(self.build_lib, name, 'locale')\n template = os.path.join(source_dir, name + '.pot')\n pkg = {'name': name,\n 'template': template,\n 'source_dir': source_dir,\n 'build_dir': build_dir}\n yield pkg",
"def gettext_for(locale='en'):\n return Translations.load(\n os.path.join(BASEDIR, 'app', 'translations'), [locale]\n ).ugettext",
"def get_locale_files(localedir):\n po_files = []\n for root, dirs, files in os.walk(localedir):\n po_files.extend(\n [os.path.join(root, fn) for fn in files\n if fn.endswith('.po')])\n\n return po_files",
"def test_localedir(self):\n self.chck_triple('localedir')",
"def translation_folder() -> Path:\n # set up the appropriate keys for each language\n keys = {\n \"en\": {\"a_key\": \"A key\", \"test_key\": \"Test key\"},\n \"fr\": {\"a_key\": \"Une clef\", \"test_key\": \"Clef de test\"},\n \"fr-FR\": {\"a_key\": \"Une clef\", \"test_key\": \"Clef de test\"},\n \"es\": {\"a_key\": \"Una llave\"},\n }\n\n with TemporaryDirectory() as tmp_dir:\n\n # create the translation files\n tmp_dir = Path(tmp_dir)\n for lan, d in keys.items():\n folder = tmp_dir / lan\n folder.mkdir()\n (folder / \"locale.json\").write_text(json.dumps(d, indent=2))\n\n yield tmp_dir\n\n return",
"def lang_init():\n _locale, _encoding = locale.getdefaultlocale() # Default system values\n path = os.path.join(os.path.dirname(sys.argv[0]), 'localization/lang')\n if os.path.exists(path):\n lang = gettext.translation('UnrulyPuzzlePython', path, [_locale],\n fallback=True)\n else:\n lang = gettext.translation('UnrulyPuzzlePython', path,\n fallback=True)\n return lang.gettext",
"def convert_translations(self, dest_dir):\n if not os.path.isdir(dest_dir):\n os.makedirs(dest_dir)\n total_translation_rows = 0\n with open(os.path.join(dest_dir, 'translations.txt'),\n 'w+b') as out_file:\n writer = csv.DictWriter(\n out_file, fieldnames=NEW_TRANSLATIONS_FIELDS)\n writer.writeheader()\n for filename in sorted(os.listdir(self.src_dir)):\n if not (filename.endswith('.txt') and\n os.path.isfile(os.path.join(self.src_dir, filename))):\n print('Skipping %s' % filename)\n continue\n table_name = filename[:-len('.txt')]\n if table_name == 'translations':\n continue\n total_translation_rows += self._translate_table(\n dest_dir, table_name, writer)\n print('Total translation rows: %s' % total_translation_rows)",
"def get_gettext():\n local_path = os.path.realpath(os.path.dirname(sys.argv[0])) + \\\n '/translations'\n langs = []\n lc, encoding = locale.getdefaultlocale()\n if (lc):\n langs = [lc]\n osLanguage = os.environ.get('LANGUAGE', None)\n if (osLanguage):\n langs += osLanguage.split(\":\")\n langs += [\"en_US\"]\n lang = gettext.translation('wicd', local_path, languages=langs, \n fallback=True)\n _ = lang.gettext\n return _",
"def update_templates():\n logging.info(\"Copying english po files to %s\" % POT_PATH)\n\n # post them to exposed URL\n ensure_dir(POT_PATH)\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"django.po\"), os.path.join(POT_PATH, \"kalite.pot\"))\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"djangojs.po\"), os.path.join(POT_PATH, \"kalitejs.pot\"))",
"def prepare_translations():\n output_fn = '/home/jelle/Desktop/django.csv'\n local('po2csv apps/dasa/locale/id/LC_MESSAGES/django.po %(output_fn)s' % locals())\n print 'output written to %(output_fn)s' % locals()",
"def install_translations(where='local'):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n\n if where == 'local':\n # if we are local, we also generate new po files\n with cd('apps/dasa/'):\n run('../../bin/django makemessages -l id')\n run('../../bin/django makemessages -l en')\n run('../../bin/django compilemessages')\n with cd('project'):\n# run('../bin/django makemessages -l id')\n run('../bin/django makemessages -l en')\n run('../bin/django compilemessages')\n else: # otherwise, we just compile\n run('git pull')\n with cd('apps/dasa/'):\n run('../../bin/django compilemessages')\n with cd('project'):\n run('../bin/django compilemessages')\n restart(where)",
"def main(verbosity=1):\r\n SOURCE_MSGS_DIR = CONFIGURATION.source_messages_dir\r\n for locale, converter in zip(CONFIGURATION.dummy_locales, [Dummy(), Dummy2()]):\r\n if verbosity:\r\n print('Processing source language files into dummy strings, locale \"{}\"'.format(locale))\r\n for source_file in CONFIGURATION.source_messages_dir.walkfiles('*.po'):\r\n if verbosity:\r\n print(' ', source_file.relpath())\r\n make_dummy(SOURCE_MSGS_DIR.joinpath(source_file), locale, converter)\r\n if verbosity:\r\n print()",
"def __initializeLocale(self):\n langdomain = 'tortugaStrings'\n\n # Locate the Internationalization stuff\n localedir = '../share/locale' \\\n if os.path.exists('../share/locale') else \\\n os.path.join(self._cm.getRoot(), 'share/locale')\n\n gettext.install(langdomain, localedir)",
"def get_messages_dir(self, locale):\r\n return LOCALE_DIR.joinpath(locale, 'LC_MESSAGES')",
"def get_templates_dirs(self): \n from pkg_resources import resource_filename\n return [ resource_filename(__name__, 'templates') ]\n # return []",
"def i18ninit():\n click.echo('-> Initializing i18n message files...')\n _extract_18n_messages()\n langs = app.config['BABEL_LANGUAGES']\n for lang in langs:\n _write_message_files(lang, command='init')\n click.echo('-> i18n message files initialized.')\n click.echo('You should now edit translations in following files:')\n for lang in langs:\n click.echo(os.path.join(I18N_PATH, lang, 'LC_MESSAGES', 'messages.po'))",
"def test_available_locales(translation_folder: Path) -> None:\n # expected grid\n res = [\"es\", \"fr\", \"fr-FR\", \"en\"]\n\n # create the translator\n # -en- to -en-\n translator = Translator(translation_folder)\n\n for locale in res:\n assert locale in translator.available_locales()\n\n # Check no hidden and protected files are in locales\n locales = translator.available_locales()\n assert not all([(loc.startswith(\".\") or loc.startswith(\"_\")) for loc in locales])\n\n return",
"def main(verbosity=1):\r\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\r\n LOCALE_DIR.parent.makedirs_p()\r\n source_msgs_dir = CONFIGURATION.source_messages_dir\r\n remove_file(source_msgs_dir.joinpath('django.po'))\r\n\r\n # Extract strings from mako templates.\r\n verbosity_map = {\r\n 0: \"-q\",\r\n 1: \"\",\r\n 2: \"-v\",\r\n }\r\n babel_verbosity = verbosity_map.get(verbosity, \"\")\r\n\r\n babel_mako_cmd = 'pybabel {verbosity} extract -F {config} -c \"Translators:\" . -o {output}'\r\n babel_mako_cmd = babel_mako_cmd.format(\r\n verbosity=babel_verbosity,\r\n config=base(LOCALE_DIR, 'babel_mako.cfg'),\r\n output=base(CONFIGURATION.source_messages_dir, 'mako.po'),\r\n )\r\n if verbosity:\r\n stderr = None\r\n else:\r\n stderr = DEVNULL\r\n\r\n execute(babel_mako_cmd, working_directory=BASE_DIR, stderr=stderr)\r\n\r\n makemessages = \"django-admin.py makemessages -l en -v{}\".format(verbosity)\r\n ignores = \" \".join('--ignore=\"{}/*\"'.format(d) for d in CONFIGURATION.ignore_dirs)\r\n if ignores:\r\n makemessages += \" \" + ignores\r\n\r\n # Extract strings from django source files, including .py files.\r\n make_django_cmd = makemessages + ' --extension html'\r\n execute(make_django_cmd, working_directory=BASE_DIR, stderr=stderr)\r\n\r\n # Extract strings from Javascript source files.\r\n make_djangojs_cmd = makemessages + ' -d djangojs --extension js'\r\n execute(make_djangojs_cmd, working_directory=BASE_DIR, stderr=stderr)\r\n\r\n # makemessages creates 'django.po'. This filename is hardcoded.\r\n # Rename it to django-partial.po to enable merging into django.po later.\r\n os.rename(\r\n source_msgs_dir.joinpath('django.po'),\r\n source_msgs_dir.joinpath('django-partial.po')\r\n )\r\n\r\n # makemessages creates 'djangojs.po'. This filename is hardcoded.\r\n # Rename it to djangojs-partial.po to enable merging into djangojs.po later.\r\n os.rename(\r\n source_msgs_dir.joinpath('djangojs.po'),\r\n source_msgs_dir.joinpath('djangojs-partial.po')\r\n )\r\n\r\n files_to_clean = set()\r\n\r\n # Extract strings from third-party applications.\r\n for app_name in CONFIGURATION.third_party:\r\n # Import the app to find out where it is. Then use pybabel to extract\r\n # from that directory.\r\n app_module = importlib.import_module(app_name)\r\n app_dir = path(app_module.__file__).dirname().dirname()\r\n output_file = source_msgs_dir / (app_name + \".po\")\r\n files_to_clean.add(output_file)\r\n\r\n babel_cmd = 'pybabel {verbosity} extract -F {config} -c \"Translators:\" {app} -o {output}'\r\n babel_cmd = babel_cmd.format(\r\n verbosity=babel_verbosity,\r\n config=LOCALE_DIR / 'babel_third_party.cfg',\r\n app=app_name,\r\n output=output_file,\r\n )\r\n execute(babel_cmd, working_directory=app_dir, stderr=stderr)\r\n\r\n # Segment the generated files.\r\n segmented_files = segment_pofiles(\"en\")\r\n files_to_clean.update(segmented_files)\r\n\r\n # Finish each file.\r\n for filename in files_to_clean:\r\n LOG.info('Cleaning %s' % filename)\r\n po = pofile(source_msgs_dir.joinpath(filename))\r\n # replace default headers with edX headers\r\n fix_header(po)\r\n # replace default metadata with edX metadata\r\n fix_metadata(po)\r\n # remove key strings which belong in messages.po\r\n strip_key_strings(po)\r\n po.save()",
"def main():\n actual_dir = os.getcwd()\n i18n_dir = os.path.join(actual_dir, 'i18n') # Directory of I18n app.\n i18n_dirname = os.path.basename(i18n_dir)\n models_file = os.path.join(i18n_dir, 'models.py')\n data_dir = os.path.join(i18n_dir, 'data') # CSV files.\n data_license = os.path.join(data_dir, 'LICENSE_CC')\n project_dir = os.path.dirname(i18n_dir)\n settings_file = os.path.join(project_dir, 'settings.py')\n\n show_license(data_license)\n i18n_model = setup_environ(project_dir, i18n_dirname, settings_file)\n models = get_data_models(models_file)\n new_models = sort_models(data_dir, models)\n for model in new_models:\n load_data(model, i18n_model, i18n_dirname)",
"def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }",
"def main(locales=None, verbosity=1): # pylint: disable=unused-argument\r\n # This is used as a tool only to segment translation files when adding a\r\n # new segment. In the regular workflow, the work is done by the extract\r\n # phase calling the functions above.\r\n locales = locales or []\r\n for locale in locales:\r\n segment_pofiles(locale)",
"def main(languages=None, empty=False, verbosity=1): # pylint: disable=unused-argument\r\n languages = languages or []\r\n\r\n if not languages:\r\n root = LOCALE_DIR\r\n validate_po_files(root, empty)\r\n return\r\n\r\n # languages will be a list of language codes; test each language.\r\n for language in languages:\r\n root = LOCALE_DIR / language\r\n # Assert that a directory for this language code exists on the system\r\n if not root.isdir():\r\n log.error(\" {0} is not a valid directory.\\nSkipping language '{1}'\".format(root, language))\r\n continue\r\n # If we found the language code's directory, validate the files.\r\n validate_po_files(root, empty)",
"def runRebuildmo():\n cwd = os.getcwd()\n import sys\n path = list(sys.path)\n languages = []\n try:\n import imp\n scriptPath = os.path.dirname(__file__)\n modulePath = os.path.join(cwd, scriptPath, REBUILDMO_DIR)\n sys.path += [modulePath, '.', cwd]\n modInfo = imp.find_module(REBUILDMO_NAME, [modulePath, '.', cwd])\n rebuildmo = imp.load_module('rebuildmo', *modInfo)\n os.chdir(modulePath)\n languages = rebuildmo.rebuildmo()\n print 'Created locale for: %s.' % ' '.join(languages)\n except Exception, e:\n print 'ERROR: unable to rebuild .mo files; caught exception %s' % e\n sys.path = path\n os.chdir(cwd)\n return languages",
"def __loadTranslator(self):\n if self.__ui is not None:\n loc = self.__ui.getLocale()\n if loc and loc != \"C\":\n locale_dir = os.path.join(\n os.path.dirname(__file__), \"ProjectDjango\", \"i18n\")\n translation = \"django_{0}\".format(loc)\n translator = QTranslator(None)\n loaded = translator.load(translation, locale_dir)\n if loaded:\n self.__translator = translator\n e5App().installTranslator(self.__translator)\n else:\n print(\"Warning: translation file '{0}' could not be\"\n \" loaded.\".format(translation))\n print(\"Using default.\")",
"def includeme(config):\n\n import patches\n config.add_translation_dirs('kotti_multilingual:locale')\n config.scan(__name__)",
"def _po_package_contents(self, package):\n po_files = glob(os.path.join(package['source_dir'], '*.po'))\n for po_file in po_files:\n language = os.path.splitext(os.path.basename(po_file))[0]\n lang_dir = os.path.join(package['build_dir'], language)\n msg_dir = os.path.join(lang_dir, 'LC_MESSAGES')\n mo_file = os.path.join(msg_dir, package['name'] + '.mo')\n yield {'language': language,\n 'lang_dir': lang_dir,\n 'msg_dir': msg_dir,\n 'mo_file': mo_file,\n 'po_file': po_file}",
"def localeborg_base():\n LocaleBorg.reset()\n assert not LocaleBorg.initialized\n LocaleBorg.initialize({}, \"en\")\n assert LocaleBorg.initialized\n assert LocaleBorg().current_lang == \"en\"\n try:\n yield\n finally:\n LocaleBorg.reset()\n assert not LocaleBorg.initialized",
"def add_support_for_localization():\n path = os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)\n possible_topdir = os.path.normpath(path)\n if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):\n sys.path.insert(0, possible_topdir)\n\n gettext.install('nova', unicode=1)",
"def _extract_18n_messages():\n BabelCLI().run(['', 'extract', '-F', 'babel.cfg', '-k', '_t', '--no-location', '--sort-output',\n '--omit-header', '-o', os.path.join(I18N_PATH, 'messages.pot'), 'aliquis'])"
]
| [
"0.5978555",
"0.5838234",
"0.5805316",
"0.57504076",
"0.57090265",
"0.5661206",
"0.5586652",
"0.55807585",
"0.5530141",
"0.5519474",
"0.5516491",
"0.5417705",
"0.5306534",
"0.5302171",
"0.5248377",
"0.5206404",
"0.51667666",
"0.5164295",
"0.5140475",
"0.5131287",
"0.5125426",
"0.5098185",
"0.5077674",
"0.5075748",
"0.5075663",
"0.50684243",
"0.506762",
"0.5064058",
"0.5050132",
"0.5040381"
]
| 0.6989117 | 0 |
Function returning list of locales for the given territory. The list is sorted from the most probable locale to the least probable one (based on langtable's ranking. | def get_territory_locales(territory):
return langtable.list_locales(territoryId=territory) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_locales(self) -> List[str]:\n\n return self.possible_locale_list",
"def translated_locales(self):\r\n return sorted(set(self.locales) - set([self.source_locale]))",
"def sorted_languages():\n # Python 3: Use functools.cmp_to_key\n def compare(a, b):\n if a.name == u\"English\":\n return -1\n elif b.name == u\"English\":\n return 1\n else:\n return cmp(a, b)\n return sorted(Language.query.all(), cmp=compare)",
"def get_language_locales(lang):\n\n parts = parse_langcode(lang)\n if \"language\" not in parts:\n raise InvalidLocaleSpec(\"'%s' is not a valid language\" % lang)\n\n return langtable.list_locales(languageId=parts[\"language\"],\n territoryId=parts.get(\"territory\", \"\"),\n scriptId=parts.get(\"script\", \"\"))",
"def languages():\n return \", \".join(sorted(\"{}: '{}'\".format(gTTS.LANGUAGES[k], k) for k in gTTS.LANGUAGES))",
"def getLocales(self):\n pass",
"def allLocales(self):\n return util.parseLocales(urlopen(self.all_url).read())",
"def get_sorted():\n return sorted(country_list, key=get_pop_and_name)",
"def GetLanguages(cls):\n return sorted(cls._LANGUAGE_PER_TAG.items())",
"def released_languages_list(self):\r\n if not self.released_languages.strip(): # pylint: disable=no-member\r\n return []\r\n\r\n languages = [lang.strip() for lang in self.released_languages.split(',')] # pylint: disable=no-member\r\n # Put in alphabetical order\r\n languages.sort()\r\n return languages",
"def find_languages(self, obj):\n languages = set()\n\n def add_langs(item):\n if hasattr(item, 'keys'): # pragma: nocover\n languages.update(item.keys())\n\n for browser in obj.all_browsers:\n add_langs(browser.name)\n add_langs(browser.note)\n\n for feature in chain([obj], obj.child_features):\n add_langs(feature.mdn_uri)\n add_langs(feature.name)\n\n for maturity in obj.all_maturities:\n add_langs(maturity.name)\n\n for reference in obj.all_references:\n add_langs(reference.note)\n\n for section in obj.all_sections:\n add_langs(section.number)\n add_langs(section.name)\n add_langs(section.subpath)\n\n for spec in obj.all_specs:\n add_langs(spec.name)\n add_langs(spec.uri)\n\n for support in obj.all_supports:\n add_langs(support.note)\n\n for version in obj.all_versions:\n add_langs(version.release_notes_uri)\n add_langs(version.note)\n\n if 'zxx' in languages:\n # No linguistic content\n languages.remove('zxx')\n if 'en' in languages:\n languages.remove('en')\n return ['en'] + sorted(languages)\n else:\n return sorted(languages)",
"def get_available_translations(localedir=None):\n\n localedir = localedir or gettext._default_localedir\n\n # usually there are no message files for en\n messagefiles = sorted(glob.glob(localedir + \"/*/LC_MESSAGES/anaconda.mo\") +\n [\"blob/en/blob/blob\"])\n trans_gen = (path.split(os.path.sep)[-3] for path in messagefiles)\n\n langs = set()\n\n for trans in trans_gen:\n parts = parse_langcode(trans)\n lang = parts.get(\"language\", \"\")\n if lang and lang not in langs:\n langs.add(lang)\n # check if there are any locales for the language\n locales = get_language_locales(lang)\n if not locales:\n continue\n\n yield lang",
"def getPossibleLangs(self):\n lst = {}\n for e in self._values:\n for lang in e.getLangCodes():\n lst[ lang ] = 1\n return lst.keys()",
"def get_available_languages(domain):\r\n if domain in _AVAILABLE_LANGUAGES:\r\n return copy.copy(_AVAILABLE_LANGUAGES[domain])\r\n\r\n localedir = '%s_LOCALEDIR' % domain.upper()\r\n find = lambda x: gettext.find(domain,\r\n localedir=os.environ.get(localedir),\r\n languages=[x])\r\n\r\n # NOTE(mrodden): en_US should always be available (and first in case\r\n # order matters) since our in-line message strings are en_US\r\n language_list = ['en_US']\r\n # NOTE(luisg): Babel <1.0 used a function called list(), which was\r\n # renamed to locale_identifiers() in >=1.0, the requirements master list\r\n # requires >=0.9.6, uncapped, so defensively work with both. We can remove\r\n # this check when the master list updates to >=1.0, and all projects udpate\r\n list_identifiers = (getattr(localedata, 'list', None) or\r\n getattr(localedata, 'locale_identifiers'))\r\n locale_identifiers = list_identifiers()\r\n for i in locale_identifiers:\r\n if find(i) is not None:\r\n language_list.append(i)\r\n _AVAILABLE_LANGUAGES[domain] = language_list\r\n return copy.copy(language_list)",
"def fetch_languages(self):\r\n \r\n # tokenize, clean and filter document tokens \r\n toks = [re.sub(r'[^a-zA-Z]','', tok.text.lower().strip()) for tok in self.doc]\r\n toks = [tok for tok in toks if len(tok)>1 and tok in LANGUAGES]\r\n toks = sorted(set(toks))\r\n \r\n return toks",
"def sorted_countries():\n ahh = [(country, COUNTRY_DATA[country]['data'].deaths[-1]) for country in COUNTRY_DATA.keys()]\n sorted_countries = sorted(ahh, key=lambda x: x[1], reverse=True)\n return [data[0] for data in sorted_countries]",
"def get_all_languages():\n\tdef _get():\n\t\tif not frappe.db:\n\t\t\tfrappe.connect()\n\t\treturn frappe.db.sql_list('select name from tabLanguage')\n\treturn frappe.cache().get_value('languages', _get)",
"def get_available_locales(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GetAvailableLocales/\"))",
"def probable_languages(\n self,\n text: str,\n max_languages: int = 3) -> Tuple[str, ...]:\n scores = self.scores(text)\n\n # Sorted from the most probable language to the least probable\n sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True)\n languages, probabilities = list(zip(*sorted_scores))\n\n # Find the most distant consecutive languages.\n # A logarithmic scale is used here because the probabilities\n # are most of the time really close to zero\n rescaled_probabilities = [log(proba) for proba in probabilities]\n distances = [\n rescaled_probabilities[pos] - rescaled_probabilities[pos+1]\n for pos in range(len(rescaled_probabilities)-1)]\n\n max_distance_pos = max(enumerate(distances, 1), key=itemgetter(1))[0]\n limit = min(max_distance_pos, max_languages)\n return languages[:limit]",
"def reduce_strings(description, locale=None):\n if locale is None:\n locales = {}\n\n if isinstance(description.locales, dict):\n for key, value in description.locales.items():\n locales[key] = reduce_strings(description, key)\n\n return locales\n\n locales = []\n\n if isinstance(description.locales, dict):\n if locale in description.locales.keys():\n locales.append({\n \"key\": description.id,\n \"value\": description.locales[locale]\n })\n\n if isinstance(description.value, list):\n for item in description.value:\n locales = locales + list(\n filter(None, reduce_strings(item, locale))\n )\n\n return locales",
"def load_pagel_ranking():\n ranking = []\n fp = open(\"Pagel-2007-200.tsv\", \"r\")\n r = csv.DictReader(fp, delimiter=\"\\t\")\n for row in r:\n word = row[\"ENGLISH\"].lower()\n if \"(\" in word:\n word = word.split(\"(\")[0].strip()\n if word.startswith(\"to \"):\n word = word.split(\" \",1)[1]\n rank = float(row[\"MEAN_RATE\"])\n ranking.append((rank, word))\n fp.close()\n ranking.sort()\n return [w for (n,w) in ranking]",
"def report_categories():\n return list(sorted(set([rt.category for rt in report_types()])))",
"def get_locale_timezones(locale):\n\n parts = parse_langcode(locale)\n if \"language\" not in parts:\n raise InvalidLocaleSpec(\"'%s' is not a valid locale\" % locale)\n\n return langtable.list_timezones(languageId=parts[\"language\"],\n territoryId=parts.get(\"territory\", \"\"),\n scriptId=parts.get(\"script\", \"\"))",
"def get_all_rankings(session: CondorSession) -> List[sc.Ranking]:\n return [sc.Ranking(matrix) for matrix in RankingMatrix.list(session)]",
"def find_best_locale_match(locale, langcodes):\n\n score_map = { \"language\" : 1000,\n \"territory\": 100,\n \"script\" : 10,\n \"encoding\" : 1 }\n\n def get_match_score(locale, langcode):\n score = 0\n\n locale_parts = parse_langcode(locale)\n langcode_parts = parse_langcode(langcode)\n if not locale_parts or not langcode_parts:\n return score\n\n for part, part_score in score_map.iteritems():\n if locale_parts[part] and langcode_parts[part]:\n if locale_parts[part] == langcode_parts[part]:\n # match\n score += part_score\n else:\n # not match\n score -= part_score\n elif langcode_parts[part] and not locale_parts[part]:\n # langcode has something the locale doesn't have\n score -= part_score\n\n return score\n\n scores = []\n\n # get score for each langcode\n for langcode in langcodes:\n scores.append((langcode, get_match_score(locale, langcode)))\n\n # find the best one\n sorted_langcodes = sorted(scores, key=lambda item_score: item_score[1], reverse=True)\n\n # matches matching only script or encoding or both are not useful\n if sorted_langcodes and sorted_langcodes[0][1] > score_map[\"territory\"]:\n return sorted_langcodes[0][0]\n else:\n return None",
"def find_possible_languages(string, allowed_languages=None):\n\n common_words = None\n if allowed_languages:\n common_words = LNG_COMMON_WORDS_STRICT\n else:\n common_words = LNG_COMMON_WORDS\n\n words = find_words(string)\n\n valid_words = []\n for word in words:\n lang_word = word.lower()\n key = 'language'\n for prefix in subtitle_prefixes:\n if lang_word.startswith(prefix):\n lang_word = lang_word[len(prefix):]\n key = 'subtitleLanguage'\n for suffix in subtitle_suffixes:\n if lang_word.endswith(suffix):\n lang_word = lang_word[:len(suffix)]\n key = 'subtitleLanguage'\n for prefix in lang_prefixes:\n if lang_word.startswith(prefix):\n lang_word = lang_word[len(prefix):]\n if lang_word not in common_words:\n try:\n lang = Language.fromguessit(lang_word)\n if allowed_languages:\n if lang.name.lower() in allowed_languages or lang.alpha2.lower() in allowed_languages or lang.alpha3.lower() in allowed_languages:\n valid_words.append((key, lang, lang_word, word))\n # Keep language with alpha2 equivalent. Others are probably\n # uncommon languages.\n elif lang == 'mul' or hasattr(lang, 'alpha2'):\n valid_words.append((key, lang, lang_word, word))\n except babelfish.Error:\n pass\n return valid_words",
"def languages_from_gsx_entry(\n resource: \"Resource\", entry: Dict[str, Dict[str, str]]\n ) -> Optional[List[Optional[ControlledTerm]]]:\n if not resource or not entry:\n return None\n\n names = get_gsx_entry_value(entry, \"language\")\n if not names:\n return None\n\n languages = []\n\n for name in names.split(\"; \"):\n name = name.split(\" [\")[0]\n term = search_term_or_none(\"iso639-2\", name)\n if term:\n languages.append(term)\n ResourceLanguage.objects.get_or_create(resource=resource, language=term)\n\n return languages",
"def list_currencies(locale: Locale | str | None = None) -> set[str]:\n # Get locale-scoped currencies.\n if locale:\n return set(Locale.parse(locale).currencies)\n return set(get_global('all_currencies'))",
"def list_of_langs(data):\n lang_codes = []\n for lang_data in data:\n lang_codes.append(lang_data.get('value'))\n return lang_codes",
"def get_languages(self):\n language_list = []\n url = '%s%s/languages.xml' % (self.URL_API, self.API_KEY)\n data = urllib.urlopen(url)\n root = cElementTree.parse(data).getroot()\n for language in root.iter('Language'):\n language_list.append(language.find('abbreviation').text)\n return language_list"
]
| [
"0.6352298",
"0.60188526",
"0.5842274",
"0.57512105",
"0.5664816",
"0.55907327",
"0.55778724",
"0.5497595",
"0.54512554",
"0.5121554",
"0.5081006",
"0.50800115",
"0.5067049",
"0.50165933",
"0.50112844",
"0.49681905",
"0.49145204",
"0.49050337",
"0.48998576",
"0.4832239",
"0.47911856",
"0.47895506",
"0.4764042",
"0.4748112",
"0.47477958",
"0.47442538",
"0.47433847",
"0.47141075",
"0.46766305",
"0.46598953"
]
| 0.6169706 | 1 |
Function returning preferred keyboard layouts for the given locale. | def get_locale_keyboards(locale):
parts = parse_langcode(locale)
if "language" not in parts:
raise InvalidLocaleSpec("'%s' is not a valid locale" % locale)
return langtable.list_keyboards(languageId=parts["language"],
territoryId=parts.get("territory", ""),
scriptId=parts.get("script", "")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_layout(keymap_c: str):\n lines = get_keymap(keymap_c)\n lines = remove_excess_white_space(lines)\n layout_strs = get_layouts_strs(lines)\n return get_layout_keys(layout_strs)",
"def get_available_layouts(self):\n\n return self._layout_infos.iterkeys()",
"def get_keyboard(self, name: str, locale: str) -> list:\n buttons = copy.deepcopy(self.__locales[locale]['keyboards']['arrangements'][name])\n for i in range(len(buttons)):\n for j in range(len(buttons[i])):\n buttons[i][j] = self.get_keyboard_button(buttons[i][j], locale)\n return buttons",
"def layout_method_mapper(self):\n return {\n \"kamada_kawai_layout\": kamada_kawai_layout,\n \"fruchterman_reingold_layout\": fruchterman_reingold_layout,\n \"spectral_layout\": spectral_layout,\n }",
"def layout_human(lang, name):\n kn = _get_keyboard_names()\n return kn.layout_human(lang, name)",
"def get_layout_names(base_url=DEFAULT_BASE_URL):\n res = commands.cyrest_get('apply/layouts', base_url=base_url)\n return res",
"def get_layout_name_mapping(base_url=DEFAULT_BASE_URL):\n layout_names = get_layout_names(base_url=base_url)\n layout_mapping = {}\n\n # get the full name of a layout and create {fullname:layoutname} in dictionary\n for layout_name in layout_names:\n res = commands.cyrest_get('apply/layouts/' + layout_name, base_url=base_url)\n layout_mapping.update({res['longName']: layout_name})\n\n return layout_mapping",
"def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())",
"def get_keymap(lines: str):\n parsed = [line for line in lines.split(\"\\n\") if line]\n\n start = end = 0\n for i, line in enumerate(parsed):\n if \"qmkformat start\" in line:\n start = i + 1\n if \"qmkformat end\" in line:\n end = i\n break\n\n layout = \"\".join(parsed[start:end])\n return layout[layout.find(\"{\") + 1 : layout.find(\"}\")]",
"def _get_keyboard_names():\n global _keyboard_names\n if _keyboard_names is None:\n _keyboard_names = KeyboardNames()\n return _keyboard_names",
"def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])",
"def get_current_layout(self):\n # ported from the widgets/src/LayoutIndicator.c code\n\n self._engine.start_listen(Xkl.EngineListenModes.TRACK_KEYBOARD_STATE)\n state = self._engine.get_current_state()\n cur_group = state.group\n num_groups = self._engine.get_num_groups()\n\n # BUG?: if the last layout in the list is activated and removed,\n # state.group may be equal to n_groups\n if cur_group >= num_groups:\n cur_group = num_groups - 1\n\n # pylint: disable=unsubscriptable-object\n layout = self._rec.layouts[cur_group]\n try:\n # pylint: disable=unsubscriptable-object\n variant = self._rec.variants[cur_group]\n except IndexError:\n # X server may have forgotten to add the \"\" variant for its default layout\n variant = \"\"\n\n self._engine.stop_listen(Xkl.EngineListenModes.TRACK_KEYBOARD_STATE)\n\n return join_layout_variant(layout, variant)",
"def create_preferred_backend_map():\n preferred_backend_map = {}\n for mapping in config.preferred_backend_map:\n key, value = list(mapping.items())[0]\n preferred_backend_map[key] = value\n return preferred_backend_map",
"def swing_modes(self):\n return list(SWING_MODE_TO_DPS_MODE.keys())",
"def get_default():\n return build_key_map({\n \"LEFT\" : \"move-left\",\n \"RIGHT\" : \"move-right\",\n \"UP\" : \"move-up\",\n \"DOWN\" : \"move-down\",\n \"S-LEFT\" : \"scroll-left\",\n \"S-RIGHT\" : \"scroll-right\",\n\n \"C-b\" : \"move-left\", # back\n \"C-f\" : \"move-right\", # forward\n \"C-k\" : \"delete-row\",\n \"C-p\" : \"move-up\", # previous\n \"C-n\" : \"move-down\", # next\n \"M-v\" : \"move-up-page\",\n \"C-v\" : \"move-down-page\",\n \"C-x\" : PREFIX,\n (\"C-x\", \"C-s\") : \"save\",\n (\"C-x\", \"C-w\") : \"save-as\",\n \"C-z\" : \"undo\",\n\n \";\" : \"decrease-column-width\",\n \"'\" : \"increase-column-width\",\n \":\" : \"decrease-column-precision\",\n \"\\\"\" : \"increase-column-precision\",\n\n \"M-#\" : \"toggle-show-row-num\",\n \"M-$\" : \"hide-column\",\n \"M-x\" : \"command\",\n\n \"q\" : \"quit\",\n })",
"def _get_locale() -> str:\n languages = flask.current_app.config['LANGUAGES'].keys()\n locale = flask.request.accept_languages.best_match(languages)\n\n # If no locale could be determined, fall back to the default.\n if locale is None:\n locale = flask.current_app.config['BABEL_DEFAULT_LOCALE']\n\n return locale",
"def find_double_layouts(self):\n\n # create a new game, dict for layouts and key pairs list\n game = Game(self.sourcefile)\n layout_dict = {}\n key_pairs = []\n \n # save layout per move\n for i in range(len(self.moves_set)):\n game.move(self.moves_set[i])\n game.board.create_layout()\n layout = game.board.layout\n\n # if layout is already in dict, save keys as key pair\n for key in layout_dict:\n if layout.tobytes() == layout_dict[key].tobytes():\n key_pairs.append([key, i])\n\n layout_dict[i] = layout\n \n return key_pairs",
"def keys(self):\n if self.widget:\n w_keys = self.widget.keys()\n else:\n w_keys = []\n return sorted(w_keys + list(ttfont_dict_keys) + [case_s])",
"def FetchLayoutsData(client):\n layout_names = ['U_layout', 'J_layout', 'E_layout', 'B_layout']\n cols = ['scancode', 'x', 'y', 'w', 'h']\n layouts = FetchSpreadsheetFeeds(client, KEYBOARD_GLYPH_SPREADSHEET_KEY,\n layout_names, cols)\n ret = {}\n for layout_name, layout in layouts.items():\n ret[layout_name[0]] = []\n for row in layout:\n line = []\n for col in cols:\n value = row.get(col)\n if not value:\n line.append('')\n else:\n if col != 'scancode':\n value = float(value)\n line.append(value)\n ret[layout_name[0]].append(line)\n return ret",
"def sp_keyboard(language_code):\n keyboard = [\n [\n make_button(buttons.SP_KOTLIN, language_code),\n make_button(buttons.SP_WEB, language_code),\n ],\n [\n make_button(buttons.SP_ANDROID, language_code),\n make_button(buttons.SP_IOS, language_code),\n ],\n [\n make_button(buttons.SP_CPP, language_code),\n make_button(buttons.SP_ALL, language_code),\n ],\n [\n make_button(buttons.EXIT_PARAMETERS, language_code),\n make_button(buttons.COURSES_RETURN, language_code),\n ],\n ]\n return InlineKeyboardMarkup(inline_keyboard=keyboard)",
"def eng2_keyboard(language_code):\n keyboard = [\n [\n make_button(buttons.ENG_C2_1, language_code),\n make_button(buttons.ENG_C2_2, language_code),\n make_button(buttons.ENG_C2_3, language_code),\n make_button(buttons.ENG_C1_1, language_code),\n make_button(buttons.ENG_C1_2, language_code),\n ],\n [\n make_button(buttons.ENG_B2_3, language_code),\n make_button(buttons.ENG_ALL, language_code),\n make_button(buttons.ENG_PREV, language_code),\n ],\n [\n make_button(buttons.EXIT_PARAMETERS, language_code),\n make_button(buttons.COURSES_RETURN, language_code),\n ],\n ]\n return InlineKeyboardMarkup(inline_keyboard=keyboard)",
"def FetchKeyboardGlyphData(client):\n glyph_cols = ['scancode', 'p0', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7',\n 'p8', 'p9', 'label', 'format', 'notes']\n keyboard_glyph_data = FetchSpreadsheetFeeds(\n client, KEYBOARD_GLYPH_SPREADSHEET_KEY,\n INPUT_METHOD_ID_TO_OVERLAY_ID.values(), glyph_cols)\n ret = {}\n for lang in keyboard_glyph_data:\n ret[lang] = {}\n keys = {}\n for line in keyboard_glyph_data[lang]:\n scancode = line.get('scancode')\n if (not scancode) and line.get('notes'):\n ret[lang]['layoutName'] = line['notes']\n continue\n del line['scancode']\n if 'notes' in line:\n del line['notes']\n if 'label' in line:\n line['label'] = LABEL_MAP.get(line['label'], line['label'])\n keys[scancode] = line\n # Add a label to space key\n if '39' not in keys:\n keys['39'] = {'label': 'space'}\n ret[lang]['keys'] = keys\n return ret",
"def getLocales(self):\n pass",
"def swing_modes(self) -> list[str]:\n return self._swing_modes",
"def get_locale_console_fonts(locale):\n\n parts = parse_langcode(locale)\n if \"language\" not in parts:\n raise InvalidLocaleSpec(\"'%s' is not a valid locale\" % locale)\n\n return langtable.list_consolefonts(languageId=parts[\"language\"],\n territoryId=parts.get(\"territory\", \"\"),\n scriptId=parts.get(\"script\", \"\"))",
"def eng1_keyboard(language_code):\n keyboard = [\n [\n make_button(buttons.ENG_B11_1, language_code),\n make_button(buttons.ENG_B11_2, language_code),\n make_button(buttons.ENG_B12_1, language_code),\n make_button(buttons.ENG_B12_2, language_code),\n ],\n [\n make_button(buttons.ENG_B2_1, language_code),\n make_button(buttons.ENG_B2_2, language_code),\n make_button(buttons.ENG_NEXT, language_code),\n ],\n [\n make_button(buttons.EXIT_PARAMETERS, language_code),\n make_button(buttons.COURSES_RETURN, language_code),\n ],\n ]\n return InlineKeyboardMarkup(inline_keyboard=keyboard)",
"def bot_locales(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotLocaleArgs']]]]:\n return pulumi.get(self, \"bot_locales\")",
"def get_klayout_folder_path():\n print(\"Finding KLayout folder...\")\n system = platform.system()\n if system == 'Windows':\n for root, dirs, files in os.walk(\"C:\"+os.sep+\"Users\"):\n for file in files:\n if root.split(os.sep)[-1] == \"KLayout\" and file == \"klayoutrc\":\n print(\"KLAYOUT FOLDER:\")\n print(root + \"\\n\")\n return root\n \n elif system == \"Darwin\":\n for root, dirs, files in os.walk(\"/Users\"):\n if root.split(os.sep)[-1] == \".klayout\":\n print(\"KLAYOUT FOLDER:\")\n print(root + \"\\n\")\n return root\n print(\"Could not find KLayout folde'r...\\n\")",
"def get_klayout_app_path():\n print(\"Finding KLayout application...\")\n system = platform.system()\n if system == 'Windows':\n for root, dirs, files in os.walk(\"C:\"+os.sep):\n for file in files:\n if file.endswith(\".exe\") and \"klayout_app\" in file:\n print(\"KLAYOUT APPLICATION PATH:\")\n print(os.path.join(root, file) + \"\\n\")\n return os.path.join(root, file)\n elif system == \"Darwin\":\n location = os.popen(\"find /Applications -name klayout.app\").read()\n if location:\n print(\"KLAYOUT APPLICATION LOCATION:\")\n print(location + \"\\n\")\n return location\n print(\"Could not find KLayout app...\\n\")",
"def get_language_code_coding_and_locale():\n # type: () -> Tuple[str, str, str]\n try:\n language_code, encoding = locale.getdefaultlocale()\n if language_code and encoding:\n used_locale = \".\".join([language_code, encoding])\n else:\n language_code = \"unknown\"\n encoding = \"unknown\"\n used_locale = \"unable to retrieve locale\"\n except Exception as e:\n language_code = \"unknown\"\n encoding = \"unknown\"\n used_locale = \"unable to retrieve locale: %s\" % (str(e))\n\n return language_code, encoding, used_locale"
]
| [
"0.60228944",
"0.5637982",
"0.55843544",
"0.5231292",
"0.5200092",
"0.51659167",
"0.4992071",
"0.4957695",
"0.4894653",
"0.48928568",
"0.48863006",
"0.47528282",
"0.47521296",
"0.4738308",
"0.46856064",
"0.46499667",
"0.46469063",
"0.46232203",
"0.46160245",
"0.4595352",
"0.45950368",
"0.45880353",
"0.4582134",
"0.45796087",
"0.45723894",
"0.45665848",
"0.45583767",
"0.45491087",
"0.44941473",
"0.44899487"
]
| 0.6866152 | 0 |
Function returning preferred timezones for the given locale. | def get_locale_timezones(locale):
parts = parse_langcode(locale)
if "language" not in parts:
raise InvalidLocaleSpec("'%s' is not a valid locale" % locale)
return langtable.list_timezones(languageId=parts["language"],
territoryId=parts.get("territory", ""),
scriptId=parts.get("script", "")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_timezone_list():\n return pytz.country_timezones('US')",
"def Timezones():\n return sorted(list(PytzCache._zmap.values()))",
"def get_timezones() -> set[str]:\n return available_timezones() - UNAVAILABLE_TIMEZONES",
"def time_zones(self) -> localedata.LocaleDataDict:\n return self._data['time_zones']",
"def display_tzname(self):\n return settings.TIME_ZONES_BY_LANG.get(self.language, settings.TIME_ZONE)",
"def _map_timezones():\n tz_map = {}\n todo = HAYSTACK_TIMEZONES_SET.copy()\n for full_tz in pytz.all_timezones:\n # Finished case:\n if not bool(todo): # pragma: no cover\n # This is nearly impossible for us to cover, and an unlikely case.\n break\n\n # Case 1: exact match\n if full_tz in todo:\n tz_map[full_tz] = full_tz # Exact match\n todo.discard(full_tz)\n continue\n\n # Case 2: suffix match after '/'\n if '/' not in full_tz:\n continue\n\n (prefix, suffix) = full_tz.split('/',1)\n # Case 2 exception: full timezone contains more than one '/' -> ignore\n if '/' in suffix:\n continue\n\n if suffix in todo:\n tz_map[suffix] = full_tz\n todo.discard(suffix)\n continue\n\n return tz_map",
"def associate_timezones_to_countries(self):\n\t\t\n\t\tresult = {}\n\t\twith open(\"/usr/share/zoneinfo/zone.tab\", \"r\") as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\tif line[0] == \"#\": continue\n\t\t\t\t\n\t\t\t\tline = line.replace(\"\\n\",\"\").split(\"\\t\")\n\t\t\t\tif not line[0] in result: result[line[0]] = line[2]\n\t\t\n\t\treturn result",
"def test_all_time_zones_choices(self):\n # Obtain a timezone that is in pytz.all_timezones, but not in pytz.common_timezones\n timezones = set(pytz.all_timezones) - set(pytz.common_timezones)\n timezone = timezones.pop()\n\n choices = {\n choice[0]\n for choice in TimeZoneField.get_all_choices()\n }\n\n self.assertTrue(timezone in choices)",
"def test_common_time_zones_choices(self):\n # Obtain a timezone that is in pytz.all_timezones, but not in pytz.common_timezones\n timezones = set(pytz.all_timezones) - set(pytz.common_timezones)\n timezone = timezones.pop()\n\n choices = {\n choice[0]\n for choice in TimeZoneField.get_common_choices()\n }\n self.assertTrue(timezone not in choices)",
"def get_timezone():\n return dates.get_timezone(_get_tz())",
"def _get_tz():\n return 'UTC'",
"def _get_tzinfo(zonelabel):\n return moment.tzinfo(zonelabel) if zonelabel else _get_global_tz()",
"def local_timezone() -> Timezone | FixedTimezone:\n return get_local_timezone()",
"def _get_local_tz(module, timezone='UTC'):\n if platform.system() == 'Linux':\n timedatectl = get_bin_path('timedatectl')\n if timedatectl is not None:\n rcode, stdout, stderr = module.run_command(timedatectl)\n if rcode == 0 and stdout:\n line = _findstr(stdout, 'Time zone')\n full_tz = line.split(\":\", 1)[1].rstrip()\n timezone = full_tz.split()[0]\n return timezone\n else:\n module.warn('Incorrect timedatectl output. Timezone will be set to UTC')\n else:\n if os.path.exists('/etc/timezone'):\n timezone = get_file_content('/etc/timezone')\n else:\n module.warn('Could not find /etc/timezone. Assuming UTC')\n\n elif platform.system() == 'SunOS':\n if os.path.exists('/etc/default/init'):\n for line in get_file_content('/etc/default/init', '').splitlines():\n if line.startswith('TZ='):\n timezone = line.split('=', 1)[1]\n return timezone\n else:\n module.warn('Could not find /etc/default/init. Assuming UTC')\n\n elif re.match('^Darwin', platform.platform()):\n systemsetup = get_bin_path('systemsetup')\n if systemsetup is not None:\n rcode, stdout, stderr = module.execute(systemsetup, '-gettimezone')\n if rcode == 0 and stdout:\n timezone = stdout.split(':', 1)[1].lstrip()\n else:\n module.warn('Could not run systemsetup. Assuming UTC')\n else:\n module.warn('Could not find systemsetup. Assuming UTC')\n\n elif re.match('^(Free|Net|Open)BSD', platform.platform()):\n if os.path.exists('/etc/timezone'):\n timezone = get_file_content('/etc/timezone')\n else:\n module.warn('Could not find /etc/timezone. Assuming UTC')\n\n elif platform.system() == 'AIX':\n aix_oslevel = int(platform.version() + platform.release())\n if aix_oslevel >= 61:\n if os.path.exists('/etc/environment'):\n for line in get_file_content('/etc/environment', '').splitlines():\n if line.startswith('TZ='):\n timezone = line.split('=', 1)[1]\n return timezone\n else:\n module.warn('Could not find /etc/environment. Assuming UTC')\n else:\n module.warn('Cannot determine timezone when AIX os level < 61. Assuming UTC')\n\n else:\n module.warn('Could not find /etc/timezone. Assuming UTC')\n\n return timezone",
"def to_nztimezone(t):\n from dateutil import tz\n utctz = tz.gettz('UTC')\n nztz = tz.gettz('Pacific/Auckland')\n return [ti.replace(tzinfo=utctz).astimezone(nztz) for ti in pd.to_datetime(t)]",
"def timezone_offset_country():\r\n\r\n return _random.choice(\r\n [\r\n 'Eniwetoa',\r\n 'Hawaii',\r\n 'Alaska',\r\n 'Pacific',\r\n 'Mountain',\r\n 'Central',\r\n 'Eastern',\r\n 'Atlantic',\r\n 'Canada',\r\n 'Brazilia',\r\n 'Buenos Aries',\r\n 'Mid-Atlantic',\r\n 'Cape Verdes',\r\n 'Greenwich Mean Time',\r\n 'Dublin',\r\n 'Berlin',\r\n 'Rome',\r\n 'Israel',\r\n 'Cairo',\r\n 'Moscow',\r\n 'Kuwait',\r\n 'Abu Dhabi',\r\n 'Muscat',\r\n 'Islamabad',\r\n 'Karachi',\r\n 'Almaty',\r\n 'Dhaka',\r\n 'Bangkok, Jakarta',\r\n 'Hong Kong',\r\n 'Beijing',\r\n 'Tokyo',\r\n 'Osaka',\r\n 'Sydney',\r\n 'Melbourne',\r\n 'Guam',\r\n 'Magadan',\r\n 'Soloman Islands',\r\n 'Fiji',\r\n 'Wellington',\r\n 'Auckland',\r\n ]\r\n )",
"def timezone():\n\n return time.timezone",
"def get_timezone(time_zone=''):\n return pytz.timezone(time_zone)",
"def time_zone():\n return timezone('Etc/GMT-10')",
"def get_timezone():\n localTimezone = request.args.get('timezone')\n if localTimezone in pytz.all_timezones:\n return localTimezone\n else:\n raise pytz.exceptions.UnknownTimeZoneError\n userId = request.args.get('login_as')\n localTimezone = users[int(userId)]['timezone']\n if localTimezone in pytz.all_timezones:\n return localTimezone\n else:\n raise pytz.exceptions.UnknownTimeZoneError\n return app.config['BABEL_DEFAULT_TIMEZONE']",
"def GetTimezones():\n return GetDataFromCsvFile('timezones.csv')",
"def timezone():\n \n pass",
"def getLocales(self):\n pass",
"def test_localeIndependent(self):\n # A point about three months in the past.\n then = self.now - (60 * 60 * 24 * 31 * 3)\n stat = os.stat_result((0, 0, 0, 0, 0, 0, 0, 0, then, 0))\n\n # Fake that we're in a language where August is not Aug (e.g.: Spanish)\n currentLocale = locale.getlocale()\n locale.setlocale(locale.LC_ALL, \"es_AR.UTF8\")\n self.addCleanup(locale.setlocale, locale.LC_ALL, currentLocale)\n\n self.assertEqual(\n self._lsInTimezone('America/New_York', stat),\n '!--------- 0 0 0 0 Aug 28 17:33 foo')\n self.assertEqual(\n self._lsInTimezone('Pacific/Auckland', stat),\n '!--------- 0 0 0 0 Aug 29 09:33 foo')",
"def english_speaking(timezone, lang, text):\n return 1 if timezone in EN_TIMEZONES else 0",
"def tz_arg(timezone: str):\n for tz in all_timezones:\n if tz.lower().endswith(timezone.lower()):\n return tz\n return None",
"def get_time_zones(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetTimeZones.create(\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def public_get_time_zones(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = PublicGetTimeZones.create(\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def __loadTimezones(self):\n try:\n fd = open(ZONEINFO_FILE)\n content = fd.readlines()\n fd.close()\n\n result = []\n\n for line in content:\n if line.startswith(\"#\"):\n continue\n\n parts = line.strip().split()\n\n if len(parts) < 3:\n continue\n\n result.append(parts[2])\n\n result.sort()\n\n return result\n except Exception as e:\n self.__logger.critical(\"Failed to load Timezones list\")\n raise ZKVMError(\"POSTINSTALL\", \"TIMEZONE\", \"TIMEZONE_LIST\")",
"def get_timezone():\n try:\n for line in open('/etc/sysconfig/clock'):\n field, value = line.split('=')\n if field.strip() == 'ZONE':\n return value.replace('\"', '').strip()\n return \"\"\n except IOError:\n return \"\""
]
| [
"0.72454035",
"0.6603862",
"0.65251845",
"0.6270924",
"0.5843416",
"0.58044297",
"0.57158774",
"0.5713681",
"0.5672513",
"0.5667701",
"0.5554293",
"0.55500525",
"0.5534162",
"0.5358738",
"0.5318892",
"0.53160745",
"0.5300022",
"0.52912754",
"0.52506125",
"0.5232472",
"0.5223967",
"0.5206145",
"0.52009976",
"0.51998895",
"0.5195669",
"0.5149296",
"0.5144455",
"0.5134456",
"0.51205754",
"0.5115859"
]
| 0.72785544 | 0 |
Function returning locale's territory. | def get_locale_territory(locale):
parts = parse_langcode(locale)
if "language" not in parts:
raise InvalidLocaleSpec("'%s' is not a valid locale" % locale)
return parts.get("territory", None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_territory_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n return locale.territories.get(self.territory or '')",
"def territories(self) -> localedata.LocaleDataDict:\n return self._data['territories']",
"def territory_name(self, language=DEFAULT_LANGUAGE, max_distance: int=25) -> str:\n return self._get_name('territory', language, max_distance)",
"def get_locale():\n return \"he\"",
"def get_locale(self):\n return self.locale",
"def get_locale(self):\n raise Unimplemented()",
"def locale(self):\n return self.__locale",
"def get_locale():\n setting = Setting.query.filter(Setting.name == 'default_language').first()\n\n if setting is not None:\n return setting.value\n\n # Return default language when none found\n return 'en'",
"def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())",
"def get_current_locale(self) -> str:\n return self.locale",
"def get_territory_locales(territory):\n\n return langtable.list_locales(territoryId=territory)",
"def get_localization(self):\n return self._request_data(\"/lokarria/localization\")",
"def get_locale_for_user(self):\n return 'en_US' # TODO(psimakov): choose proper locale from profile",
"def country_or_region(self) -> str:\n return pulumi.get(self, \"country_or_region\")",
"def get_locale(self):\n\n return to_locale(settings.LANGUAGE_CODE).replace(\"_\", \"-\")",
"def territory(self):\n cache_key = (\n self.board.length, self.board.height, self.uid, self.index)\n if cache_key not in self.territory_cache:\n vector = self.compute_territory()\n self.territory_cache[cache_key] = vector\n else:\n vector = self.territory_cache[cache_key]\n return vector",
"def get_region(region):\n region = region.lower()\n if region in ['cena', 'ena', 'ceus', 'eus']:\n return 'cena'\n elif region in ['wna', 'wus']:\n return 'wna'\n else:\n raise NotImplementedError('No recognized region for: %s', region)",
"def getUS() -> int:\n pass",
"def country() -> str:",
"def province():\r\n return _random.choice(\r\n [\r\n [\"Ontario\", \"ON\"],\r\n [\"Quebec\", \"QC\"],\r\n [\"Nova Scotia\", \"NS\"],\r\n [\"New Brunswick\", \"NB\"],\r\n [\"Manitoba\", \"MB\"],\r\n [\"British Columbia\", \"BC\"],\r\n [\"Prince Edward Island\", \"PE\"],\r\n [\"Saskatchewan\", \"SK\"],\r\n [\"Alberta\", \"AB\"],\r\n [\"Newfoundland and Labrador\", \"NL\"]\r\n ]\r\n )",
"def get_region(state: str) -> str:\n state: str = state.title()\n region: str = \"\"\n states: dict = {\n \"NORTE\": [\n \"Tocantins\",\n \"Pará\",\n \"Amapá\",\n \"Roraima\",\n \"Amazonas\",\n \"Acre\",\n \"Rondônia\",\n ],\n \"NORDESTE\": [\n \"Alagoas\",\n \"Bahia\",\n \"Ceará\",\n \"Maranhão\",\n \"Paraíba\",\n \"Pernambuco\",\n \"Piauí\",\n \"Rio Grande Do Norte\",\n \"Sergipe\",\n ],\n \"CENTRO-OESTE\": [\n \"Goiás\",\n \"Mato Grosso\",\n \"Mato Grosso Do Sul\",\n \"Distrito Federal\",\n ],\n \"SUDESTE\": [\n \"São Paulo\",\n \"Rio de Janeiro\",\n \"Espírito Santo\",\n \"Minas Gerais\",\n ],\n \"SUL\": [\"Rio Grande Do Sul\", \"Paraná\", \"Santa Catarina\"],\n }\n\n for (key, values) in states.items():\n if state in values:\n region = key\n\n if not region:\n return \"DESCONHECIDO\"\n\n return region",
"def get_locale():\n return babel.Locale.parse(_get_locale())",
"def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])",
"def get_continent(self):\n return self._tab.find(\"table\", class_=\"details\").find(\"td\", class_=\"value\").get_text()",
"def locale(self) -> \"Locale\":\n raise NotImplementedError",
"def get_top_regional_geolevel(self, rnode):\n return self.get_node('GeoLevels/GeoLevel', parent=rnode)",
"def get_language(self):\r\n return self.language",
"def get_lang(self):\n props = getToolByName(self.context,\n 'portal_properties')\n return props.site_properties.getProperty('default_language') or 'en'",
"def get_language_code_coding_and_locale():\n # type: () -> Tuple[str, str, str]\n try:\n language_code, encoding = locale.getdefaultlocale()\n if language_code and encoding:\n used_locale = \".\".join([language_code, encoding])\n else:\n language_code = \"unknown\"\n encoding = \"unknown\"\n used_locale = \"unable to retrieve locale\"\n except Exception as e:\n language_code = \"unknown\"\n encoding = \"unknown\"\n used_locale = \"unable to retrieve locale: %s\" % (str(e))\n\n return language_code, encoding, used_locale",
"def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()"
]
| [
"0.6446209",
"0.6410002",
"0.610459",
"0.60990167",
"0.5954823",
"0.58422744",
"0.5822268",
"0.56995887",
"0.5688319",
"0.5630719",
"0.557179",
"0.55118454",
"0.54884374",
"0.54781526",
"0.5470992",
"0.5453731",
"0.54023576",
"0.5325863",
"0.53256893",
"0.5282508",
"0.5279689",
"0.5262301",
"0.52465564",
"0.52117574",
"0.52035475",
"0.51879346",
"0.516167",
"0.51314133",
"0.5127511",
"0.51197636"
]
| 0.6819847 | 0 |
Function returning preferred console fonts for the given locale. | def get_locale_console_fonts(locale):
parts = parse_langcode(locale)
if "language" not in parts:
raise InvalidLocaleSpec("'%s' is not a valid locale" % locale)
return langtable.list_consolefonts(languageId=parts["language"],
territoryId=parts.get("territory", ""),
scriptId=parts.get("script", "")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getAvailableFonts():\n return list(AVAILABLE_FONTS)",
"def get_system_fonts():\n fonts = set()\n for x in font_manager.findSystemFonts():\n dot = x.rfind('.')\n slash = x.rfind(sep)\n x = x[slash + 1:dot]\n fonts.add(x)\n return sorted(fonts)",
"def available_text_fonts():\n bad = [u'acalc',\n u'acb',\n u'aco',\n u'acp']\n all = available_fonts()\n fonts = []\n for f in all:\n if (f == u'Series 60 ZDigi'):\n continue\n for b in bad:\n try:\n if (f.lower().startswith(b) and f[len(b)].isdigit()):\n break\n except IndexError:\n pass\n else:\n fonts.append(f)\n\n\n\n def compare(a, b):\n return -(a.lower() < b.lower())\n\n\n fonts.sort(compare)\n return fonts",
"def get_fonts():\r\n return pygame.font.get_fonts()",
"def get_fonts(self):\n\n font_path = self.execute_shell([\"figlet\", \"-I2\"])\n\n # get the font files installed in font_path,\n # and clean them up for printing\n fonts = [os.path.split(x)[1].split(\".\")[0] \\\n for x in self.execute_shell([\"find\",\n font_path, \"-iname\", \"*.flf\"]).split(\"\\n\")]\n\n return fonts",
"def get_font_dict(f):\n return tk_font.Font(font=f).actual()",
"def _set_default_font(cls):\n if platform.system() == \"Linux\":\n for family in (\"DejaVu Sans\", \"Noto Sans\", \"Nimbus Sans\"):\n if family in tk.font.families():\n logger.debug(\"Setting default font to: '%s'\", family)\n tk.font.nametofont(\"TkDefaultFont\").configure(family=family)\n tk.font.nametofont(\"TkHeadingFont\").configure(family=family)\n tk.font.nametofont(\"TkMenuFont\").configure(family=family)\n break\n return tk.font.nametofont(\"TkDefaultFont\").configure()[\"family\"]",
"def get_fonts():\n fonts = [f.name for f in matplotlib.font_manager.fontManager.ttflist]\n fonts.append([f.name for f in matplotlib.font_manager.fontManager.afmlist])\n\n fonts = sorted(list(set(fonts[:-1])))\n\n return fonts",
"def getFontPaths():\n global FONT_PATHS\n if not FONT_PATHS:\n\n if os.name == 'posix':\n # Try typical OSX font folders\n\n paths = ('/Library/Fonts', os.path.expanduser('~/Library/Fonts')) \n for path in paths:\n if os.path.exists(path):\n _recursivelyCollectFontPaths(path, FONT_PATHS)\n # Add other typical Linux font folders here to look at.\n elif os.name in ('nt', 'os2', 'ce', 'java', 'riscos'):\n # Add other typical Windows font folders here to look at.\n pass\n else:\n raise NotImplementedError('Unknown platform type \"%s\"' % os.name)\n # Add PageBot repository fonts, they always exist in this context.\n _recursivelyCollectFontPaths(getRootFontPath(), FONT_PATHS)\n\n return FONT_PATHS",
"def GetFont(*args, **kwargs):\n return _gdi_.StockGDI_GetFont(*args, **kwargs)",
"def sans_serif():\n plt.rc(\"font\", family=\"serif\")",
"def GetFont(*args, **kwargs):\n return _gdi_.DC_GetFont(*args, **kwargs)",
"def Font_GetDefaultEncoding(*args):\n return _gdi_.Font_GetDefaultEncoding(*args)",
"def get_locale():\n return \"he\"",
"def font(self):\n return self[\"font\"]",
"def font(self):\n return self[\"font\"]",
"def font(self):\n return self[\"font\"]",
"def get_fonts():\n\n fontpath = \"/usr/share/fonts/truetype/freefont/\"\n font1 = \"FreeSansBold.ttf\"\n pfont = {}\n pfont['big'] = ImageFont.truetype(fontpath + font1, 120)\n pfont['medium'] = ImageFont.truetype(fontpath + font1, 70)\n pfont['small'] = ImageFont.truetype(fontpath + font1, 25)\n pfont['time'] = ImageFont.truetype(fontpath + font1, 160)\n \n return pfont",
"def getfonts(self):\n return self.vffile.getfonts()",
"def GetDefaultFont(self):\n return wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL)",
"def GetNativeFontInfo(*args, **kwargs):\n return _gdi_.Font_GetNativeFontInfo(*args, **kwargs)",
"def getFontPaths(extraPaths=None):\n global FONT_PATHS\n if extraPaths is not None:\n FONT_PATHS = {} # Force (new) initialization\n\n if not FONT_PATHS:\n\n # If forced or initial call, collect the font paths on this\n # platform.\n if os.name == 'posix':\n paths = []\n\n # TODO: only for darwin platform.\n # Try typical OSX font folders:\n paths += ['/Library/Fonts', os.path.expanduser('~/Library/Fonts')]\n\n # Add other typical GNU+Linux font folders here to look at:\n paths += ['/usr/share/fonts']\n\n for path in paths:\n if os.path.exists(path):\n _recursivelyCollectFontPaths(path, FONT_PATHS)\n\n elif os.name in ('nt', 'os2', 'ce', 'java', 'riscos'):\n # Add other typical Windows font folders here to look at.\n pass\n else:\n raise NotImplementedError('Unknown platform type \"%s\"' % os.name)\n\n # Add the name name:fileName combinations from the predefined\n # dictionary (made with DrawBot.installedFonts())\n for fontName, fileName in FONT_NAME_2_FILE_NAME.items():\n if not fontName in FONT_PATHS:\n for path in paths:\n fontPath = path + '/' + fileName\n if os.path.exists(fontPath):\n FONT_PATHS[fontName] = fontPath\n break\n\n # Add PageBot repository fonts, they always exist in this context. But\n # they can be overwritten by fonts with the same (file) name in the\n # extraPaths.\n testFontsPath = getTestFontsPath()\n _recursivelyCollectFontPaths(testFontsPath, FONT_PATHS)\n\n if extraPaths is not None:\n if not isinstance(extraPaths, (list, tuple)):\n extraPaths = [extraPaths]\n for extraPath in extraPaths:\n _recursivelyCollectFontPaths(extraPath, FONT_PATHS)\n\n\n return FONT_PATHS",
"def get_font(self, option):\n return get_font(option=option)",
"def locale_supported_in_console(locale):\n\n locale_scripts = get_locale_scripts(locale)\n return set(locale_scripts).issubset(SCRIPTS_SUPPORTED_BY_CONSOLE)",
"def get_families() -> list:\n if not mainloop._initialized:\n raise RuntimeError(\"the mainloop needs to be initialized\")\n if not _family_cache:\n # The wrapper function can return anything iterable.\n _family_cache.add('Monospace')\n _family_cache.update(_get_wrapper('font:get_families')())\n # It's important to return a copy here because someone might\n # mutate the returned list.\n return sorted(_family_cache, key=str.casefold)",
"def GetDefaultEncoding(*args, **kwargs):\n return _gdi_.Font_GetDefaultEncoding(*args, **kwargs)",
"def GetNativeFontEncoding(*args, **kwargs):\n return _gdi_.GetNativeFontEncoding(*args, **kwargs)",
"def get_font_options(self): # real signature unknown; restored from __doc__\n pass",
"def setCommonFonts(windows=None):\n f = setFont('fontNormal', family=cfgFontName, size=cfgFontSize)\n aliasFont('fontButton', 'fontNormal')\n fb = setFont('fontBold', family=cfgFontName, size=cfgFontSize, weight='bold')\n fi = setFont('fontItalic', family=cfgFontName, size=cfgFontSize, slant='italic')\n setFont('fontLabel', family=cfgFontName, size=cfgFontSize+1, weight='bold')\n if windows:\n windows.fontBig = tkFont.Font(size=cfgFontSize+2, family=cfgFontName, weight='bold')\n windows.font = f\n windows.fontBold = fb\n windows.fontItalic = fi",
"def getpreferredencoding() -> str:\n return locale.getpreferredencoding() or \"UTF-8\""
]
| [
"0.6526404",
"0.61930466",
"0.61460704",
"0.6015974",
"0.59824073",
"0.5922822",
"0.58878744",
"0.5753985",
"0.56537426",
"0.5618229",
"0.5439051",
"0.5422323",
"0.54174143",
"0.5331235",
"0.53221977",
"0.53221977",
"0.53221977",
"0.52959025",
"0.5294225",
"0.5262638",
"0.5261653",
"0.52282387",
"0.52256954",
"0.51924527",
"0.51844877",
"0.5178031",
"0.5176452",
"0.51590663",
"0.5136787",
"0.5131527"
]
| 0.7950278 | 0 |
Function returning preferred scripts (writing systems) for the given locale. | def get_locale_scripts(locale):
parts = parse_langcode(locale)
if "language" not in parts:
raise InvalidLocaleSpec("'%s' is not a valid locale" % locale)
return langtable.list_scripts(languageId=parts["language"],
territoryId=parts.get("territory", ""),
scriptId=parts.get("script", "")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_language_script(script):\n languages_scripts = {\n 'arab': ('ara', 'per'),\n 'cyrl': ('bel', 'chu', 'mac', 'rus', 'srp', 'ukr'),\n 'grek': ('grc', 'gre'),\n 'hani': ('chi', 'jpn'),\n 'hebr': ('heb', 'lad', 'yid'),\n 'jpan': ('jpn', ),\n 'kore': ('kor', ),\n 'zyyy': ('chi', )\n }\n if script in languages_scripts:\n languages = ([marc21.lang_from_008] +\n marc21.langs_from_041_a +\n marc21.langs_from_041_h)\n for lang in languages:\n if lang in languages_scripts[script]:\n return '-'.join([lang, script])\n error_print('WARNING LANGUAGE SCRIPTS:', marc21.bib_id,\n script, '008:', marc21.lang_from_008,\n '041$a:', marc21.langs_from_041_a,\n '041$h:', marc21.langs_from_041_h)\n return '-'.join(['und', script])",
"def get_locale():\n return \"he\"",
"def get_script_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n return locale.scripts.get(self.script or '')",
"def do_locale(args):\r\n # Global can't be defined at module level. Processes are wierd. pylint: disable=W0601\r\n global ARGS\r\n signal.signal(signal.SIGINT, signal.SIG_IGN) # Set the workers to ignore KeyboardInterrupts.\r\n # Unpack arguments\r\n lang, langs, stem, cstem, modfilter, brname, browser, ARGS = args\r\n parseargs()\r\n # A Hack. CN has a different structure, so use a different url form.\r\n if lang == 'cn':\r\n stem = cstem\r\n # Reset the driver between rounds\r\n restart_driver(browser)\r\n # Log into the site, so you can access the modules.\r\n try:\r\n log_in(lang)\r\n except Exception:\r\n DRIVER.quit()\r\n return '\"Login to {0} failed. That breaks the whole locale, look into it:\\n{1}\"'.format(\r\n lang, tidy_error().replace('\"', '\"\"'))\r\n\r\n # Start recording results.\r\n result = '_'.join([lang.upper(), brname.upper()])\r\n for mod in modfilter:\r\n try:\r\n # Figure out the locale coding.\r\n url = stem.format(langs[lang][0].replace('-', '_'), MODULES[mod][lang])\r\n DRIVER.get(url)\r\n begin_module()\r\n # Try to do the module\r\n for elem in SCRIPTS[mod]:\r\n domo(elem)\r\n result += ',\"{0}: PASS\"'.format(get_time())\r\n # Something goes wrong, document it and go to the next module.\r\n except Exception:\r\n result += ',\"{0}: FAIL: {1}\"'.format(get_time(), tidy_error().replace('\"', '\"\"'))\r\n draw_failure(lang, mod)\r\n DRIVER.quit()\r\n return result",
"def get_language_and_script(tracker):\n script = \"latin\"\n language = \"en\"\n for event in reversed(tracker.events):\n if event.get(\"event\") == \"user\":\n parse_data = event['parse_data']\n language = parse_data['language']['name']\n script = parse_data['script']\n break\n return language, script",
"def identifyLangage(script):\n\tlangage = \"undefined\"\n\tscriptNameInArray = script.split(\".\")\n\textension = scriptNameInArray[-1]\n\t\n\tif(extension == \"pl\"):\n\t\tlangage = \"perl\"\n\telif(extension == \"py\"):\n\t\tlangage = \"python\"\n\telif(extension == \"sh\"):\n\t\tlangage = \"bash\"\n\telse:\n\t\tlangage == \"not recognised\"\n\n\treturn langage",
"def bot_locales(self) -> pulumi.Output[Optional[Sequence['outputs.BotLocale']]]:\n return pulumi.get(self, \"bot_locales\")",
"def scripts(self) -> localedata.LocaleDataDict:\n return self._data['scripts']",
"def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())",
"def browserLanguages(request):\n fallback = []\n accepted = request.http_accept_language\n if accepted:\n # Extract the languages names from the string\n accepted = accepted.split(',')\n accepted = map(lambda x: x.split(';')[0], accepted)\n # Add base language for each sub language. If the user specified\n # a sub language like \"en-us\", we will try to to provide it or\n # a least the base language \"en\" in this case.\n for lang in accepted:\n lang = lang.lower()\n fallback.append(lang)\n if '-' in lang:\n baselang = lang.split('-')[0]\n fallback.append(baselang)\n return fallback",
"def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])",
"def locale_supported_in_console(locale):\n\n locale_scripts = get_locale_scripts(locale)\n return set(locale_scripts).issubset(SCRIPTS_SUPPORTED_BY_CONSOLE)",
"def getpreferredencoding() -> str:\n return locale.getpreferredencoding() or \"UTF-8\"",
"def languages():\n return \", \".join(sorted(\"{}: '{}'\".format(gTTS.LANGUAGES[k], k) for k in gTTS.LANGUAGES))",
"def get_exe_language(code_file):\n extension = code_file.split('.')[-1]\n if extension == 'py':\n return 'python'\n elif extension == 'm':\n return 'matlab'\n elif extension == 'sh':\n return 'bash'\n elif extension == 'rb':\n return 'ruby'\n else:\n print(\"Warning: file %s don't have any known extension \\\n(.py/.m/.sh/.rb)\" % code_file)\n return None",
"def guess_language(lang_list=None):\n\tlang_codes = frappe.request.accept_languages.values()\n\tif not lang_codes:\n\t\treturn frappe.local.lang\n\n\tguess = None\n\tif not lang_list:\n\t\tlang_list = get_all_languages() or []\n\n\tfor l in lang_codes:\n\t\tcode = l.strip()\n\t\tif not isinstance(code, text_type):\n\t\t\tcode = text_type(code, 'utf-8')\n\t\tif code in lang_list or code == \"en\":\n\t\t\tguess = code\n\t\t\tbreak\n\n\t\t# check if parent language (pt) is setup, if variant (pt-BR)\n\t\tif \"-\" in code:\n\t\t\tcode = code.split(\"-\")[0]\n\t\t\tif code in lang_list:\n\t\t\t\tguess = code\n\t\t\t\tbreak\n\n\treturn guess or frappe.local.lang",
"def programming_language(self) -> str:\n return self.random.choice(PROGRAMMING_LANGS)",
"def get_language_code_coding_and_locale():\n # type: () -> Tuple[str, str, str]\n try:\n language_code, encoding = locale.getdefaultlocale()\n if language_code and encoding:\n used_locale = \".\".join([language_code, encoding])\n else:\n language_code = \"unknown\"\n encoding = \"unknown\"\n used_locale = \"unable to retrieve locale\"\n except Exception as e:\n language_code = \"unknown\"\n encoding = \"unknown\"\n used_locale = \"unable to retrieve locale: %s\" % (str(e))\n\n return language_code, encoding, used_locale",
"def get_gettext():\n local_path = os.path.realpath(os.path.dirname(sys.argv[0])) + \\\n '/translations'\n langs = []\n lc, encoding = locale.getdefaultlocale()\n if (lc):\n langs = [lc]\n osLanguage = os.environ.get('LANGUAGE', None)\n if (osLanguage):\n langs += osLanguage.split(\":\")\n langs += [\"en_US\"]\n lang = gettext.translation('wicd', local_path, languages=langs, \n fallback=True)\n _ = lang.gettext\n return _",
"def get_langs():\r\n temp = \"\"\r\n translate_client = translate.Client()\r\n for i in translate_client.get_languages():\r\n temp += i['name'] + \": \" + i['language'] + \"\\n\"\r\n\r\n return temp",
"def default_locale(category: str | None = None, aliases: Mapping[str, str] = LOCALE_ALIASES) -> str | None:\n varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')\n for name in filter(None, varnames):\n locale = os.getenv(name)\n if locale:\n if name == 'LANGUAGE' and ':' in locale:\n # the LANGUAGE variable may contain a colon-separated list of\n # language codes; we just pick the language on the list\n locale = locale.split(':')[0]\n if locale.split('.')[0] in ('C', 'POSIX'):\n locale = 'en_US_POSIX'\n elif aliases and locale in aliases:\n locale = aliases[locale]\n try:\n return get_locale_identifier(parse_locale(locale))\n except ValueError:\n pass\n return None",
"def unparseLanguageSystems(tables):\n scripts = OrderedDict()\n for table in tables:\n if not table.table.ScriptList:\n continue\n for scriptRecord in table.table.ScriptList.ScriptRecord:\n scriptTag = scriptRecord.ScriptTag\n languages = scripts.get(scriptTag, [])\n script = scriptRecord.Script\n items = []\n if script.DefaultLangSys is not None:\n items.append((\"dflt\", script.DefaultLangSys))\n items += [(l.LangSysTag, l.LangSys) for l in script.LangSysRecord]\n languages = set([i[0] for i in items])\n\n if languages and not scriptTag in scripts:\n scripts[scriptTag] = languages\n\n return scripts",
"def bot_locales(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BotLocaleArgs']]]]:\n return pulumi.get(self, \"bot_locales\")",
"def scriptpath(self, code):\n return '' if code == 'en' else ('/' + code)",
"def _get_locale() -> str:\n languages = flask.current_app.config['LANGUAGES'].keys()\n locale = flask.request.accept_languages.best_match(languages)\n\n # If no locale could be determined, fall back to the default.\n if locale is None:\n locale = flask.current_app.config['BABEL_DEFAULT_LOCALE']\n\n return locale",
"def charset():\n global _STATIC_VARS\n if 'charset' not in _STATIC_VARS:\n lang = getenv('LC_ALL')\n if not lang:\n lang = getenv('LANG')\n if lang:\n _STATIC_VARS.charset = \\\n lang.rsplit('.', 1)[-1].replace('-', '').lower()\n else:\n _STATIC_VARS.charset = ''\n return _STATIC_VARS.charset",
"def _get_available_languages(self):\n return stopwords.fileids()",
"def get_display_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n retval = locale.languages.get(self.language)\n if retval and (self.territory or self.script or self.variant):\n details = []\n if self.script:\n details.append(locale.scripts.get(self.script))\n if self.territory:\n details.append(locale.territories.get(self.territory))\n if self.variant:\n details.append(locale.variants.get(self.variant))\n if self.modifier:\n details.append(self.modifier)\n detail_string = ', '.join(atom for atom in details if atom)\n if detail_string:\n retval += f\" ({detail_string})\"\n return retval",
"def get_locale():\n setting = Setting.query.filter(Setting.name == 'default_language').first()\n\n if setting is not None:\n return setting.value\n\n # Return default language when none found\n return 'en'",
"def get_default_scripts ( self ):\n return roverlay.util.dictwalk.dictmerge (\n self.iter_default_scripts ( unpack=True ),\n get_value=lambda kv:kv[1]\n )"
]
| [
"0.6649128",
"0.5827049",
"0.57364774",
"0.5626669",
"0.55779046",
"0.53860724",
"0.5307141",
"0.53032994",
"0.5285117",
"0.5285105",
"0.5262619",
"0.52498895",
"0.5218934",
"0.5216045",
"0.52101284",
"0.52076274",
"0.5197666",
"0.5140851",
"0.5096763",
"0.5073295",
"0.5065786",
"0.50490785",
"0.49911094",
"0.49902838",
"0.49746886",
"0.49663076",
"0.49380055",
"0.49375892",
"0.49339426",
"0.4931999"
]
| 0.67950034 | 0 |
Write language configuration to the $root/etc/locale.conf file. | def write_language_configuration(lang, root):
try:
fpath = os.path.normpath(root + LOCALE_CONF_FILE_PATH)
with open(fpath, "w") as fobj:
fobj.write('LANG="%s"\n' % lang.lang)
except IOError as ioerr:
msg = "Cannot write language configuration file: %s" % ioerr.strerror
raise LocalizationConfigError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_config(lang):\n\n # ~~ create CONFIG ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n put_file_content('CONFIG', ['2' if lang == 'en' else '1', '6', ''])\n return True",
"def write_locales(config: Config) -> Config:\n strings_rendered = render_strings(reduce_strings(config.root))\n\n destination_files = []\n\n for key, contents in strings_rendered.items():\n destination_file = os.path.join(\n config.destination,\n \"res\",\n key,\n \"description\",\n \"{}.str\".format(config.name)\n )\n\n contents = \"\\n\".join([COMMENT_C + PREFIX, contents])\n\n assert_directories(destination_file, True)\n\n with open(destination_file, \"w\") as f:\n f.write(contents)\n\n destination_files.append(destination_file)\n\n return config",
"def store_as_django_locale(locale, content):\n\n filepath = \"%s%s\" % (DJANGO_I18N_OUTPUT_PATH, f\"%s/LC_MESSAGES/django.po\" % locale)\n\n # If the language does not exist yet, make the folder supporting this language.\n os.makedirs(Path(filepath).parent, exist_ok=True)\n\n with open(filepath, 'w') as f:\n f.write(content.decode('UTF-8'))",
"def write_translations_file(app, lang, app_messages=None):\n\tif not app_messages:\n\t\tapp_messages = get_messages_for_app(app)\n\n\tif not app_messages:\n\t\treturn\n\n\ttpath = frappe.get_pymodule_path(app, \"translations\")\n\tfrappe.create_folder(tpath)\n\twrite_json_file(os.path.join(tpath, lang + \".json\"), app_messages)",
"def _set_mode(self, langs):\n if self.__mode == \"configparser\":\n config = configparser.ConfigParser()\n config.read(self.__lang_file)\n config[\"servers\"] = {}\n for lang in langs:\n config[lang] = {}\n with open(self.__lang_file, 'w') as configfile:\n config.write(configfile)\n elif self.__mode == \"json\":\n with open(self.__lang_file, 'w') as f:\n f.write(\"{}\")\n with open(self.__lang_file, 'r') as f:\n data = json.load(f)\n data[\"servers\"] = {}\n for lang in langs:\n data[lang] = {}\n with open(self.__lang_file, 'w') as f:\n json.dump(data, f, indent=2)",
"def _save_lang(self):\n for combobox, (option, _default) in list(self.comboboxes.items()):\n if option == 'interface_language':\n data = combobox.itemData(combobox.currentIndex())\n value = from_qvariant(data, to_text_string)\n break\n save_lang_conf(value)\n self.set_option('interface_language', value)",
"def save( self ):\n ini = codecs.open(self.filename,\"w\",\"utf-8\",errors=\"replace\",buffering=0)\n for (name,value) in self.conf.items():\n print >>ini, name, \"=\", value\n ini.close()",
"def create_yaml_languages():\n with open(join(dirname(__file__), 'languages.yaml'), 'w') as f:\n yaml.dump(list(iter_languages()), f)",
"def write_config(self):\n cfg = {\n 'ALERT_API_KEY':self.api_key,\n 'APP_NAME':self.title,\n 'alertes':self.alertes\n }\n write_conf(self.CONF_FILE,cfg)",
"def write_config(self):\n cfg = {\n 'channel':self.channel,\n 'seuil_min':self.seuil_min,\n 'last_level':self.last_level,\n 'last_level_date':self.last_level_date\n }\n write_conf(self.CONF_FILE,cfg)",
"def write_translations_file(app, lang, full_dict=None, app_messages=None):\n\tif not app_messages:\n\t\tapp_messages = get_messages_for_app(app)\n\n\tif not app_messages:\n\t\treturn\n\n\ttpath = frappe.get_app_path(app, \"translations\")\n\tfrappe.create_folder(tpath)\n\twrite_csv_file(\n\t\tos.path.join(tpath, lang + \".csv\"), app_messages, full_dict or get_all_translations(lang)\n\t)",
"def setPortalLocale( self ):\n info = getLanguageInfo( self )\n\n # find default and effective locale settings\n def_locale = info.get( sys.platform + '_locale' ) or info.get( os.name + '_locale' )\n cur_locale = getlocale()\n cur_locale = None not in cur_locale and '.'.join( cur_locale ) or ''\n\n # check whether locale is already ok\n if def_locale is None or cur_locale.lower() == def_locale.lower():\n return\n\n # change effective locale\n try:\n setlocale( LC_ALL, def_locale )\n except Exceptions.LocaleError:\n pass",
"def write_config(config, config_template, config_path):\n with open(config_path, 'wb') as cfg_file:\n cfg_file.write(\n config_template.render(**config).encode(\"utf-8\")\n )",
"def write(self, fp):\n if self._defaults:\n s = str(\"[%s]\\n\" % configparser.DEFAULTSECT)\n\n # This is python version dependent. Again :(\n if sys.version_info[0] == 2:\n fp.write(s)\n\n for (key, value) in self._defaults.items():\n self._write_item(fp, key, value)\n fp.write(\"\\n\")\n elif sys.version_info[0] == 3:\n fp.write(bytes(s, 'UTF-8'))\n\n for (key, value) in self._defaults.items():\n self._write_item(fp, key, value)\n fp.write(bytes(\"\\n\"), 'UTF-8')\n else:\n raise Exception(\"Unknown python version\")\n\n for section in self._sections:\n\n if sys.version_info[0] == 2:\n s = str(\"[%s]\\n\" % section)\n fp.write(s)\n\n for (key, value) in self._sections[section].items():\n self._write_item(fp, key, value)\n s = str(\"\\n\")\n fp.write(s)\n elif sys.version_info[0] == 3:\n s = str(\"[%s]\\n\" % section)\n fp.write(bytes(s, 'UTF-8'))\n for (key, value) in self._sections[section].items():\n self._write_item(fp, key, value)\n s = str(\"\\n\")\n fp.write(bytes(s, 'UTF-8'))\n else:\n raise Exception(\"Unknown python version\")",
"def add_translation_to_file(language, translation):\n with open(f'{TRANSLATIONS_DIRECTORY}/{language}', 'a') as f:\n f.write(translation + '\\n')",
"def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)",
"def set_i18n(lang, language=None):\n import gettext\n import locale\n import warnings\n import os\n\n try:\n locale.setlocale(locale.LC_ALL, lang)\n locale.setlocale(locale.LC_MESSAGES, language or lang)\n os.environ[\"LANG\"] = lang\n os.environ[\"LANGUAGE\"] = language or lang.split(\".\")[0]\n except locale.Error:\n warnings.warn(f\"locale is not supported: {lang}\")\n gettext.bindtextdomain(\"messages\", localedir=LOCALEDIR)",
"def create_dirsconf():\n data = textwrap.dedent('''\\\n [InstallDirs]\n docdir = /usr/share/doc/qastetray\n icondir = /usr/share/icons\n localedir = /usr/share/locale\n ''')\n with open('build/usr/lib/python3/dist-packages/'\n 'qastetray/dirs.conf', 'w') as f:\n f.write(data)",
"def do_lang(self, lang):\n\n self.lang = lang\n print(\"Set language to %s\" % lang)",
"def clean_configuration_directory():\r\n for locale in CONFIGURATION.translated_locales:\r\n clean_conf_folder(locale)",
"def configure(self, engine):\n # TODO: Set a units code as well\n code = engine.get_lang_code('basic', 'en')\n self['config']['StdReport']['BasicReport']['lang'] = code\n return True",
"def configure():\n\n configuration_file = '{}/templates/tmux.conf'.format(ROOT_FOLDER)\n destination_file = '.tmux.conf'\n\n print(green('Uploading configuration file...'))\n put(configuration_file, destination_file)",
"def write(self):\n cfgpath = os.path.join(self.config_dir, CONFIG_FILENAME)\n ofile = open(cfgpath, 'w')\n if ofile:\n log.debug( \"Write config: %s\" % cfgpath )\n cfg = yaml.dump(self.yaml, default_flow_style=False)\n log.debug( \"Config:\\n%s\" % cfg)\n ofile.write(cfg)\n ofile.close()",
"def includeme(config):\n\n import patches\n config.add_translation_dirs('kotti_multilingual:locale')\n config.scan(__name__)",
"def write_config_file():\n\tif not config_parser:\n\t\tprint \"Config module not loaded. I don't save anything.\"\n\t\treturn\n\n\tf = file(config_file, \"w\")\n\tconfig_parser.write(f)\n\tf.close()",
"def i18nupdate():\n click.echo('-> Updating i18n message files...')\n _extract_18n_messages()\n langs = app.config['BABEL_LANGUAGES']\n for lang in langs:\n _write_message_files(lang)\n click.echo('-> i18n message files updated.\\n')\n click.echo('You should now edit translations in following files:')\n for lang in langs:\n click.echo(os.path.join(I18N_PATH, lang, 'LC_MESSAGES', 'messages.po'))",
"def setup_locale(locale, lang=None, text_mode=False):\n\n if lang:\n lang.lang = locale\n\n # not all locales might be displayable in text mode\n if text_mode:\n # check if the script corresponding to the locale/language\n # can be displayed by the Linux console\n # * all scripts for the given locale/language need to be\n # supported by the linux console\n # * otherwise users might get a screen full of white rectangles\n # (also known as \"tofu\") in text mode\n # then we also need to check if we have information about what\n # font to use for correctly displaying the given language/locale\n\n script_supported = locale_supported_in_console(locale)\n log.debug(\"scripts found for locale %s: %s\", locale, get_locale_scripts(locale))\n\n console_fonts = get_locale_console_fonts(locale)\n log.debug(\"console fonts found for locale %s: %s\", locale, console_fonts)\n\n font_set = False\n if script_supported and console_fonts:\n # try to set console font\n for font in console_fonts:\n if set_console_font(font):\n # console font set successfully, skip the rest\n font_set = True\n break\n\n if not font_set:\n log.warning(\"can't set console font for locale %s\", locale)\n # report what exactly went wrong\n if not(script_supported):\n log.warning(\"script not supported by console for locale %s\", locale)\n if not(console_fonts): # no fonts known for locale\n log.warning(\"no console font found for locale %s\", locale)\n if script_supported and console_fonts:\n log.warning(\"none of the suggested fonts can be set for locale %s\", locale)\n log.warning(\"falling back to the English locale\")\n locale = constants.DEFAULT_LANG\n os.environ[\"LANG\"] = locale # pylint: disable=environment-modify\n\n # set the locale to the value we have selected\n log.debug(\"setting locale to: %s\", locale)\n setenv(\"LANG\", locale)\n locale_mod.setlocale(locale_mod.LC_ALL, locale)",
"def saveConfig():\n with open(_CONFIG_FNM, 'w') as configfile:\n CONFIG_DICT.write(configfile,\n space_around_delimiters=True)",
"def write(self, config_path=CONFIG_PATH):\n\n with open(self.full_path(config_path), 'w') as conf_fh:\n conf_fh.write(self.local_config)",
"def __write_config(self):\n with open(self.config_file, 'w') as data_file:\n config = {\"ibooks_doc_root\":self.ibooks_doc_root,\n \"library_folder\":self.library_folder,\n \"annotation_folder\":self.annotation_folder,\n \"tmp_dir\":self.tmp_dir\n } \n data = json.dumps(config, ensure_ascii=False)\n data_file.write(data)"
]
| [
"0.6383091",
"0.5974715",
"0.59279996",
"0.5890416",
"0.58505267",
"0.58486074",
"0.5719034",
"0.57134926",
"0.5584349",
"0.5558312",
"0.5506062",
"0.54640675",
"0.54373384",
"0.54269797",
"0.541539",
"0.54020566",
"0.5401911",
"0.53910375",
"0.5380744",
"0.53300685",
"0.5324231",
"0.53109294",
"0.5275312",
"0.5259869",
"0.5255301",
"0.52536124",
"0.52465206",
"0.5242958",
"0.5239827",
"0.5204316"
]
| 0.848899 | 0 |
Procedure that returns the firmware language information (if any). | def get_firmware_language(text_mode=False):
try:
n = "/sys/firmware/efi/efivars/PlatformLang-8be4df61-93ca-11d2-aa0d-00e098032b8c"
d = open(n, 'r', 0).read()
except IOError:
return None
# the contents of the file are:
# 4-bytes of attribute data that we don't care about
# NUL terminated ASCII string like 'en-US'.
if len(d) < 10:
log.debug("PlatformLang was too short")
return None
d = d[4:]
if d[2] != '-':
log.debug("PlatformLang was malformed")
return None
# they use - and we use _, so fix it...
d = d[:2] + '_' + d[3:-1]
# UEFI 2.3.1 Errata C specifies 2 aliases in common use that
# aren't part of RFC 4646, but are allowed in PlatformLang.
# Because why make anything simple?
if d.startswith('zh_chs'):
d = 'zh_Hans'
elif d.startswith('zh_cht'):
d = 'zh_Hant'
d += '.UTF-8'
if not is_supported_locale(d):
log.debug("PlatformLang was '%s', which is unsupported.", d)
return None
locales = get_language_locales(d)
if not locales:
log.debug("No locales found for the PlatformLang '%s'.", d)
return None
log.debug("Using UEFI PlatformLang '%s' ('%s') as our language.", d, locales[0])
return locales[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_language_info(self):\n # If the following does not find _lang_list, then it assumes \n # there are no defined languages. If it finds _lang_list and \n # _lang_c, and _lang_c is listed in _lang_list then it assumes \n # everything is correct. It only does further checking if \n # _lang_list is there AND either _lang_c is missing or _lang_c \n # is not in _lang_list.\n \n chrdict = self._chrdict\n \n if \"_dta\" not in chrdict or \"_lang_list\" not in chrdict[\"_dta\"]:\n nlangs = 1\n curr_lang = \"default\"\n langs = [curr_lang,]\n else:\n dta_dict = chrdict[\"_dta\"]\n langs = dta_dict[\"_lang_list\"].split()\n nlangs = len(langs)\n has_lang_c = (\"_lang_c\" in dta_dict)\n curr_lang = dta_dict['_lang_c'] if has_lang_c else 'default'\n # Safety in case of malformed chrdict. \n # Also guards against empty lang list.\n if curr_lang not in langs or not has_lang_c:\n if IN_STATA:\n print(\"\".join(\n (\"{err}\",\n \"odd values in characteristics; \",\n \"trying to recover\")))\n else:\n print(\"odd values in characteristics; trying to recover\")\n \n # make sure curr_lang is not one of the stored languages\n \n # get stored languages\n stored_langs = set()\n for sub_dict in chrdict.values():\n for key in sub_dict.keys():\n if (key.startswith('_lang_l_') or \n key.startswith('_lang_v_')):\n stored_langs.add(key[8:])\n \n # if curr_lang in stored_langs, change curr_lang until it isn't\n count = 1\n while curr_lang in stored_langs:\n if curr_lang[:7] == 'default':\n count += 1\n curr_lang = 'default' + str(count)\n else:\n curr_lang = 'default'\n \n # make new langs and nlangs\n langs = list(stored_langs.union({curr_lang,}))\n nlangs = len(langs)\n \n return curr_lang, langs, nlangs",
"def describeWikiLanguage(ver, app):\r\n\r\n return ((\"wikidpad_mini_1_0\", u\"WikidPad Mini 1.0\", parserFactory,\r\n True, languageHelperFactory, True),)",
"def GetLanguageInfo(*args, **kwargs):\n return _gdi_.Locale_GetLanguageInfo(*args, **kwargs)",
"def Locale_GetLanguageInfo(*args, **kwargs):\n return _gdi_.Locale_GetLanguageInfo(*args, **kwargs)",
"def getVKBLanguage(self):\r\n\r\n return self.phone.sx('(send (send (get-input-locale-manager) get-current-locale) get-iso)', convertToString=False)",
"def getData(language=None):",
"def get_language_list_gui():\n _ = get_gettext()\n language = {}\n language['connect'] = _(\"Connect\")\n language['ip'] = _(\"IP\")\n language['netmask'] = _(\"Netmask\")\n language['gateway'] = _('Gateway')\n language['dns'] = _('DNS')\n language['use_static_ip'] = _('Use Static IPs')\n language['use_static_dns'] = _('Use Static DNS')\n language['use_encryption'] = _('Use Encryption')\n language['advanced_settings'] = _('Advanced Settings')\n language['wired_network'] = _('Wired Network')\n language['wired_network_instructions'] = _('To connect to a wired network,'\n ' you must create a network profile. To create a network profile, type a'\n ' name that describes this network, and press Add.')\n language['automatic_connect'] = _('Automatically connect to this network')\n language['secured'] = _('Secured')\n language['unsecured'] = _('Unsecured')\n language['channel'] = _('Channel')\n language['preferences'] = _('Preferences')\n language['wpa_supplicant_driver'] = _('WPA Supplicant Driver')\n language['wireless_interface'] = _('Wireless Interface')\n language['wired_interface'] = _('Wired Interface')\n language['hidden_network'] = _('Hidden Network')\n language['hidden_network_essid'] = _('Hidden Network ESSID')\n language['connected_to_wireless'] = _('Connected to $A at $B (IP: $C)')\n language['connected_to_wired'] = _('Connected to wired network (IP: $A)')\n language['not_connected'] = _('Not connected')\n language['no_wireless_networks_found'] = _('No wireless networks found.')\n language['killswitch_enabled'] = _('Wireless Kill Switch Enabled')\n language['key'] = _('Key')\n language['username'] = _('Username')\n language['password'] = _('Password')\n language['anonymous_identity'] = _('Anonymous Identity')\n language['identity'] = _('Identity')\n language['authentication'] = _('Authentication')\n language['path_to_pac_file'] = _('Path to PAC File')\n language['select_a_network'] = _('Choose from the networks below:')\n language['connecting'] = _('Connecting...')\n language['wired_always_on'] = _('Always show wired interface')\n language['auto_reconnect'] = _('Automatically reconnect on connection loss')\n language['create_adhoc_network'] = _('Create an Ad-Hoc Network')\n language['essid'] = _('ESSID')\n language['use_wep_encryption'] = _('Use Encryption (WEP only)')\n language['before_script'] = _('Run script before connect')\n language['after_script'] = _('Run script after connect')\n language['disconnect_script'] = _('Run disconnect script')\n language['script_settings'] = _('Scripts')\n language['use_ics'] = _('Activate Internet Connection Sharing')\n language['madwifi_for_adhoc'] = _('Check if using madwifi/atheros drivers')\n language['default_wired'] = _('Use as default profile (overwrites any previous default)')\n language['use_debug_mode'] = _('Enable debug mode')\n language['use_global_dns'] = _('Use global DNS servers')\n language['use_default_profile'] = _('Use default profile on wired autoconnect')\n language['show_wired_list'] = _('Prompt for profile on wired autoconnect')\n language['use_last_used_profile'] = _('Use last used profile on wired autoconnect')\n language['choose_wired_profile'] = _('Select or create a wired profile to connect with')\n language['wired_network_found'] = _('Wired connection detected')\n language['stop_showing_chooser'] = _('Stop Showing Autoconnect pop-up temporarily')\n language['display_type_dialog'] = _('Use dBm to measure signal strength')\n language['scripts'] = _('Scripts')\n language['invalid_address'] = _('Invalid address in $A entry.')\n language['global_settings'] = _('Use these settings for all networks sharing this essid')\n language['encrypt_info_missing'] = _('Required encryption information is missing.')\n language['enable_encryption'] = _('This network requires encryption to be enabled.')\n language['wicd_auto_config'] = _('Automatic (recommended)')\n language[\"gen_settings\"] = _(\"General Settings\")\n language[\"ext_programs\"] = _(\"External Programs\")\n language[\"dhcp_client\"] = _(\"DHCP Client\")\n language[\"wired_detect\"] = _(\"Wired Link Detection\")\n language[\"route_flush\"] = _(\"Route Table Flushing\")\n language[\"backend\"] = _(\"Backend\")\n language[\"backend_alert\"] = _(\"Changes to your backend won't occur until the daemon is restarted.\")\n language['0'] = _('0')\n language['1'] = _('1')\n language['2'] = _('2')\n language['3'] = _('3')\n language['4'] = _('4')\n language['5'] = _('5')\n language['6'] = _('6')\n language['7'] = _('7')\n language['8'] = _('8')\n language['9'] = _('9')\n language['interface_down'] = _('Putting interface down...')\n language['resetting_ip_address'] = _('Resetting IP address...')\n language['interface_up'] = _('Putting interface up...')\n language['setting_encryption_info'] = _('Setting encryption info')\n language['removing_old_connection'] = _('Removing old connection...')\n language['generating_psk'] = _('Generating PSK...')\n language['generating_wpa_config'] = _('Generating WPA configuration file...')\n language['flushing_routing_table'] = _('Flushing the routing table...')\n language['configuring_interface'] = _('Configuring wireless interface...')\n language['validating_authentication'] = _('Validating authentication...')\n language['setting_broadcast_address'] = _('Setting broadcast address...')\n language['setting_static_dns'] = _('Setting static DNS servers...')\n language['setting_static_ip'] = _('Setting static IP addresses...')\n language['running_dhcp'] = _('Obtaining IP address...')\n language['dhcp_failed'] = _('Connection Failed: Unable to Get IP Address')\n language['aborted'] = _('Connection Cancelled')\n language['bad_pass'] = _('Connection Failed: Bad password')\n language['done'] = _('Done connecting...')\n return language",
"def get_language(self, text):\n try:\n post_lang = detect(text)\n except:\n post_lang = 'N/A'\n return post_lang",
"def wikiLanguages():\n return languages",
"def detect_language(self):\n if not self.clean:\n self._text_clean()\n if not self.clean:\n return\n self.payload = \"q={}\".format(self.text)\n resp = requests.request('POST', self.url_language, data=self.payload.encode('utf-8'),\n headers=self.translate_headers)\n try:\n self.language = json.loads(resp.text)['data']['detections'][0][0]['language']\n except KeyError:\n return",
"def language(self):\r\n return self._get('language', {})",
"def FindLanguageInfo(*args, **kwargs):\n return _gdi_.Locale_FindLanguageInfo(*args, **kwargs)",
"def get_weather_language(self):\n return self.bot_data_file[\"weather\"][\"default_language\"]",
"def Locale_FindLanguageInfo(*args, **kwargs):\n return _gdi_.Locale_FindLanguageInfo(*args, **kwargs)",
"def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language",
"def requestLanguage(request):\n # Return the user language preferences for registered users\n if request.user.valid and request.user.language:\n return request.user.language\n\n # Or try to return one of the user browser accepted languages, if it\n # is available on this wiki...\n available = wikiLanguages()\n if not request.cfg.language_ignore_browser:\n for lang in browserLanguages(request):\n if lang in available:\n return lang\n \n # Or return the wiki default language...\n if request.cfg.language_default in available:\n lang = request.cfg.language_default\n # If everything else fails, read the manual... or return 'en'\n else:\n lang = 'en'\n return lang",
"def get_language(self):\r\n return self.language",
"def get_language(self):\n return self.lang",
"def getTRSLanguage():\n try:\n return sys.argv[1]\n except IndexError as error:\n print(\"No language argument\\n\")\n sys.exit()",
"def get_language():\n try:\n from leaves.middleware import request_context\n return request_context.language\n except:\n return get_site().preferences.default_language",
"def get_user_language() -> str:\n languages = {\n \"arabic\": \"arb\",\n \"chinese\": \"cmn-CN\",\n \"danish\": \"da-DK\",\n \"english\": \"en-GB\",\n \"french\": \"fr-FR\",\n \"german\": \"de-DE\",\n \"portuguese\": \"pl-PT\",\n \"spanish\": \"es-ES\"\n }\n textlang = input(\"What language do you want to hear?\")\n try:\n return languages[textlang.lower()]\n except KeyError as e:\n print(\"Enter a valid language.\")\n sys.exit(1)",
"def detect_language(self, path=None, payload=None):\n\n f = file_path(path, payload)\n switches = [\"-l\", f]\n result = self._command_template(switches)\n return result, path, f",
"def extract_language_info(self, source: str) -> Dict[str, float]:\n languages = self.languages_compiled_exp.findall(source)\n language_info = {}\n for lang in languages:\n name = ' '.join(lang.split()[:-1])\n percent = float(lang.split()[-1]) # %\n language_info[name] = percent\n return language_info",
"def language(self):\n if \"language\" in self._prop_dict:\n return self._prop_dict[\"language\"]\n else:\n return None",
"def GetLanguages():\n return GetDataFromCsvFile('languages.csv')",
"def get_languages_info(repo):\n assert \"languages_url\" in repo, \"Languages URL is missing.\"\n response = requests.get(repo[\"languages_url\"])\n data = response.json()\n return data",
"def language(self):\n lang = None\n if self.__dict__['TAG:language']:\n lang = self.__dict__['TAG:language']\n return lang",
"def get_language_code_coding_and_locale():\n # type: () -> Tuple[str, str, str]\n try:\n language_code, encoding = locale.getdefaultlocale()\n if language_code and encoding:\n used_locale = \".\".join([language_code, encoding])\n else:\n language_code = \"unknown\"\n encoding = \"unknown\"\n used_locale = \"unable to retrieve locale\"\n except Exception as e:\n language_code = \"unknown\"\n encoding = \"unknown\"\n used_locale = \"unable to retrieve locale: %s\" % (str(e))\n\n return language_code, encoding, used_locale",
"def init_translations():\n if \"@lang\" in input.load_input():\n lang = input.get_lang()\n try:\n trad = gettext.GNUTranslations(open(\"../course/common_student/$i18n/\" + lang + \".mo\", \"rb\"))\n except FileNotFoundError:\n trad = gettext.NullTranslations()\n trad.install()\n return lang\n trad = gettext.NullTranslations()\n trad.install()\n return \"en\"",
"def get_languages():\n\n api = (api_name, 'languages')\n\n response = make_request(api=api, action='get', **{})\n status_code = response.status_code\n content = response.text\n\n msg = str(status_code) + ' : ' + content\n \n logger.debug(\"response from spanglish languages: {}\".format(response))\n logger.debug(\"response statuscode from spanglish languages: {}\".format(status_code))\n\n click.echo(\"response message: %s \" % msg)"
]
| [
"0.6677828",
"0.64402527",
"0.6295128",
"0.6291391",
"0.62625706",
"0.62259954",
"0.61591226",
"0.60692286",
"0.605621",
"0.5981005",
"0.5974207",
"0.5966141",
"0.5956271",
"0.59466666",
"0.5931928",
"0.59110105",
"0.5824171",
"0.58137363",
"0.581215",
"0.5801298",
"0.5789102",
"0.57793474",
"0.5752736",
"0.5730168",
"0.5727869",
"0.5721389",
"0.56998765",
"0.5696372",
"0.5649404",
"0.5644513"
]
| 0.71954906 | 0 |
Try to set console font to the given value. | def set_console_font(font):
log.debug("setting console font to %s", font)
rc = execWithRedirect("setfont", [font])
if rc == 0:
log.debug("console font set successfully to %s", font)
return True
else:
log.error("setting console font to %s failed", font)
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setTTFont(font='default'):\n if font == 'default':\n font = 'Times New Roman' \n dislin.winfnt(font)",
"def set_font(self, font='A'):\n upper = font.upper()\n if upper == 'B':\n self._set_print_mode(self.FONT_MASK)\n elif upper == 'A':\n self._unset_print_mode(self.FONT_MASK)\n else:\n self._unset_print_mode(self.FONT_MASK)",
"def set_font(self, font: str):\n self.font = font",
"def setDislinFont(font='default'):\n fontdict[font]()",
"def set_font(self, font):\n\tself.m_font = font",
"def SetFont(*args):\n return _gdi_.GraphicsContext_SetFont(*args)",
"def setFont(self, font):\n self.edit.document().setDefaultFont(font)\n self.edit.setFont(font)\n super(BaseConsole, self).setFont(font)",
"def font(self, font='a'):\n if font not in self.__class__.__fontMap.keys():\n raise ValueError('font must be \\'a\\', \\'b\\', \\'c\\'')\n elif self._usePrintMode:\n self._textFont = font\n self._updatePrintMode()\n else:\n self._write(self.__class__.__ESC + 'M' + self.__class__.__fontMap[font])",
"def shell_font_changed(self, font):\n self.set_font(font)",
"def setHardwareFont():\n dislin.hwfont()",
"def SetFont(*args, **kwargs):\n return _gdi_.DC_SetFont(*args, **kwargs)",
"def setFont(fontKey, update=False, **opts):\n if not hasFont(fontKey) or update:\n globals()[fontKey] = tkFont.Font(**opts)\n \n return globals()[fontKey]",
"def set_font(self, font):\n\ttry:\n\t self.m_gdfont = self._fonts[font.lower()]\n\t self.m_font = font\n\texcept KeyError:\n\t raise ValueError, 'Illegal font name.'",
"def test_configs_font(\n self):\n root = Tk()\n custom = font.Font(root, family='Helvetica', size=12)\n self.assertEqual(custom.cget('family'), 'Helvetica')\n fontSelect.font_style(custom, 'Times')\n self.assertEqual(custom.cget('family'), 'Times')\n fontSelect.font_size(custom, 18)\n self.assertEqual(custom.cget('size'), 18)",
"def setPSFont(font='default'):\n if font == 'default':\n font = 'Times-Roman'\n dislin.psfont(font)",
"def set_font(self, font):\n this.font = font\n # Signal to the application that we need a resize\n this.chsize()",
"def SetFont(*args, **kwargs):\n return _gdi_.PseudoDC_SetFont(*args, **kwargs)",
"def set_font_family(self, font):\n self.parent.setCurrentFont(font)",
"def setFont(font='default',hardware=1):\n if font == 'default' and hardware:\n setHardwareFont()\n return\n currfmt = getFileFormat()\n if isPostscript(currfmt):\n setPSFont(font)\n elif isWMF(currfmt):\n setTTFont(font)\n else:\n setDislinFont(font)",
"def set_font_size(*args):\n size = font_size.get()\n message_inp.configure(font=f'TKDefault {size}')",
"def setfont(font=font_default, unicode=True):\n # | - setfont\n # Use TeX for all figure text!\n plt.rc('text', usetex=True)\n\n font = font.lower().replace(\" \", \"\")\n if font == 'times':\n # Times\n font = {'family': 'serif', 'serif': ['Times']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage{mathptmx}\n \"\"\"\n elif font == 'helvetica':\n # Helvetica\n # set serif, too. Otherwise setting to times and then\n # Helvetica causes an error.\n font = {'family': 'sans-serif', 'sans-serif': ['Helvetica'],\n 'serif': ['cm10']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage[tx]{sfmath}\n \\usepackage{helvet}\n \\usepackage{sansmath}\n \"\"\"\n else:\n # Computer modern serif\n font = {'family': 'serif', 'serif': ['cm10']}\n # preamble = r\"\"\"\n preamble = r\"\"\"\n \\usepackage{color}\n \"\"\"\n\n if font == 'cmss':\n # Computer modern sans serif\n font = {'family': 'sans-serif', 'serif': ['cmss']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage[tx]{sfmath}\n \"\"\"\n\n if unicode:\n # Unicode for Tex\n #preamble = r\"\"\"\\usepackage[utf8]{inputenc}\"\"\" + preamble\n # inputenc should be set automatically\n plt.rcParams['text.latex.unicode'] = True\n\n # print font, preamble\n plt.rc('font', **font)\n plt.rcParams['text.latex.preamble'] = preamble\n #__|",
"def setfont(font=font_default, unicode=True):\n # | - setfont\n # Use TeX for all figure text!\n plt.rc('text', usetex=True)\n\n font = font.lower().replace(\" \", \"\")\n if font == 'times':\n # Times\n font = {'family': 'serif', 'serif': ['Times']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage{mathptmx}\n \"\"\"\n elif font == 'helvetica':\n # Helvetica\n # set serif, too. Otherwise setting to times and then\n # Helvetica causes an error.\n font = {'family': 'sans-serif', 'sans-serif': ['Helvetica'],\n 'serif': ['cm10']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage[tx]{sfmath}\n \\usepackage{helvet}\n \\usepackage{sansmath}\n \"\"\"\n else:\n # Computer modern serif\n font = {'family': 'serif', 'serif': ['cm10']}\n # preamble = r\"\"\"\n preamble = r\"\"\"\n \\usepackage{color}\n \"\"\"\n\n if font == 'cmss':\n # Computer modern sans serif\n font = {'family': 'sans-serif', 'serif': ['cmss']}\n preamble = r\"\"\"\n \\usepackage{color}\n \\usepackage[tx]{sfmath}\n \"\"\"\n\n if unicode:\n # Unicode for Tex\n #preamble = r\"\"\"\\usepackage[utf8]{inputenc}\"\"\" + preamble\n # inputenc should be set automatically\n plt.rcParams['text.latex.unicode'] = True\n\n # print font, preamble\n plt.rc('font', **font)\n plt.rcParams['text.latex.preamble'] = preamble\n #__|",
"def SetFont(self, font):\r\n \r\n self._font = font",
"def _set_default_font(cls):\n if platform.system() == \"Linux\":\n for family in (\"DejaVu Sans\", \"Noto Sans\", \"Nimbus Sans\"):\n if family in tk.font.families():\n logger.debug(\"Setting default font to: '%s'\", family)\n tk.font.nametofont(\"TkDefaultFont\").configure(family=family)\n tk.font.nametofont(\"TkHeadingFont\").configure(family=family)\n tk.font.nametofont(\"TkMenuFont\").configure(family=family)\n break\n return tk.font.nametofont(\"TkDefaultFont\").configure()[\"family\"]",
"def set_font(self, font, option):\n # Update fonts in all plugins\n set_font(font, option=option)\n plugins = self.main.widgetlist + self.main.thirdparty_plugins\n for plugin in plugins:\n plugin.update_font()",
"def setFontFallback(self,value):\n self.PDFreactorConfiguration.in1[\"fontFallback\"] = value",
"def SetFont(self, font):\r\n\r\n self._font = font",
"def fl_set_font(fontnum, size):\n _fl_set_font = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_font\",\\\n None, [cty.c_int, cty.c_int],\\\n \"\"\"void fl_set_font(int numb, int size)\"\"\")\n library.check_if_flinitialized()\n i_fontnum = library.convert_to_intc(fontnum)\n i_size = library.convert_to_intc(size)\n library.keep_elem_refs(fontnum, i_fontnum, size, i_size)\n _fl_set_font(i_fontnum, i_size)",
"def comdlg32_ChooseFont(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpcf\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def get_font(self, option):\n return get_font(option=option)"
]
| [
"0.70265186",
"0.70135546",
"0.69861555",
"0.6951681",
"0.6923162",
"0.6910645",
"0.68927073",
"0.6834571",
"0.6778966",
"0.67207116",
"0.67187274",
"0.66070294",
"0.66006804",
"0.6571807",
"0.6571661",
"0.6523891",
"0.6514027",
"0.64832944",
"0.647367",
"0.64600194",
"0.6459305",
"0.6459305",
"0.6449787",
"0.6426006",
"0.64081997",
"0.6368928",
"0.6364681",
"0.63539004",
"0.6293877",
"0.6273333"
]
| 0.75561583 | 0 |
Clean and configure the local environment variables. This function will attempt to determine the desired locale and configure the process environment (os.environ) in the least surprising way. If a locale argument is provided, it will be attempted first. After that, this function will attempt to use the language environment variables in a manner similar to gettext(3) (in order, $LANGUAGE, $LC_ALL, $LC_MESSAGES, $LANG), followed by the UEFI PlatformLang, followed by a default. When this function returns, $LANG will be set, and $LANGUAGE, $LC_ALL, and $LC_MESSAGES will not be set, because they get in the way when changing the language after startup. This function must be run before any threads are started. This function modifies the process environment, which is not threadsafe. | def setup_locale_environment(locale=None, text_mode=False, prefer_environment=False):
# pylint: disable=environment-modify
# Look for a locale in the environment. If the variable is setup but
# empty it doesn't count, and some programs (KDE) actually do this.
# If prefer_environment is set, the environment locale can override
# the parameter passed in. This can be used, for example, by initial-setup,
# to prefer the possibly-more-recent environment settings before falling back
# to a locale set at install time and saved in the kickstart.
if not locale or prefer_environment:
for varname in ("LANGUAGE", "LC_ALL", "LC_MESSAGES", "LANG"):
if varname in os.environ and os.environ[varname]:
locale = os.environ[varname]
break
# Look for a locale in the firmware if there was nothing in the environment
if not locale:
locale = get_firmware_language(text_mode)
# parse the locale using langtable
if locale:
env_langs = get_language_locales(locale)
if env_langs:
# the first langauge is the best match
locale = env_langs[0]
else:
log.error("Invalid locale '%s' given on command line, kickstart or environment", locale)
locale = None
# If langtable returned no locales, or if nothing was configured, fall back to the default
if not locale:
locale = constants.DEFAULT_LANG
# Save the locale in the environment
os.environ["LANG"] = locale
# Cleanup the rest of the environment variables
for varname in ("LANGUAGE", "LC_ALL", "LC_MESSAGES"):
if varname in os.environ:
del os.environ[varname] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _initializeLocale():\n \n if sys.platform == constants.WIN32:\n locale.setlocale(locale.LC_ALL, \"\")\n else:\n if constants.LC_ALL in os.environ:\n try:\n locale.setlocale(locale.LC_ALL, os.environ[constants.LC_ALL])\n return\n except locale.Error:\n # First try did not work, encoding must be set first then set locale.\n pass\n languageCode, encoding = locale.getdefaultlocale()\n if languageCode is None:\n languageCode = \"en_US\"\n # Set the encoding of the Python environment if no encoding is set.\n if encoding is None:\n encoding = constants.UTF8\n if encoding.lower() == \"utf\":\n encoding = constants.UTF8\n try:\n locale.setlocale(locale.LC_ALL, \"%s.%s\" % (languageCode, encoding))\n except locale.Error:\n try:\n locale.setlocale(locale.LC_ALL, \"en_US.UTF-8\")\n except locale.Error:\n locale.setlocale(locale.LC_ALL, \"C\")",
"def setup_locale(locale, lang=None, text_mode=False):\n\n if lang:\n lang.lang = locale\n\n # not all locales might be displayable in text mode\n if text_mode:\n # check if the script corresponding to the locale/language\n # can be displayed by the Linux console\n # * all scripts for the given locale/language need to be\n # supported by the linux console\n # * otherwise users might get a screen full of white rectangles\n # (also known as \"tofu\") in text mode\n # then we also need to check if we have information about what\n # font to use for correctly displaying the given language/locale\n\n script_supported = locale_supported_in_console(locale)\n log.debug(\"scripts found for locale %s: %s\", locale, get_locale_scripts(locale))\n\n console_fonts = get_locale_console_fonts(locale)\n log.debug(\"console fonts found for locale %s: %s\", locale, console_fonts)\n\n font_set = False\n if script_supported and console_fonts:\n # try to set console font\n for font in console_fonts:\n if set_console_font(font):\n # console font set successfully, skip the rest\n font_set = True\n break\n\n if not font_set:\n log.warning(\"can't set console font for locale %s\", locale)\n # report what exactly went wrong\n if not(script_supported):\n log.warning(\"script not supported by console for locale %s\", locale)\n if not(console_fonts): # no fonts known for locale\n log.warning(\"no console font found for locale %s\", locale)\n if script_supported and console_fonts:\n log.warning(\"none of the suggested fonts can be set for locale %s\", locale)\n log.warning(\"falling back to the English locale\")\n locale = constants.DEFAULT_LANG\n os.environ[\"LANG\"] = locale # pylint: disable=environment-modify\n\n # set the locale to the value we have selected\n log.debug(\"setting locale to: %s\", locale)\n setenv(\"LANG\", locale)\n locale_mod.setlocale(locale_mod.LC_ALL, locale)",
"def get_english_env(env):\n if sys.platform == 'win32':\n return None\n env = env or os.environ\n\n # Test if it is necessary at all.\n is_english = lambda name: env.get(name, 'en').startswith('en')\n\n if is_english('LANG') and is_english('LANGUAGE'):\n return None\n\n # Requires modifications.\n env = env.copy()\n def fix_lang(name):\n if not is_english(name):\n env[name] = 'en_US.UTF-8'\n fix_lang('LANG')\n fix_lang('LANGUAGE')\n return env",
"def InitLocale(self):\n self.ResetLocale()\n if 'wxMSW' in wx.PlatformInfo:\n import locale\n try:\n lang, enc = locale.getdefaultlocale()\n self._initial_locale = wx.Locale(lang, lang[:2], lang)\n # locale.setlocale(locale.LC_ALL, lang)\n # locale.setlocale(locale.LC_ALL, 'C')\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale: lang = {lang}\\n')\n print(lang)\n except (ValueError, locale.Error) as ex:\n target = wx.LogStderr()\n orig = wx.Log.SetActiveTarget(target)\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale:except-0 Unable to set default locale: \\'{ex}\\'\\n')\n print(\"Unable to set default locale: '{}'\".format(ex))\n wx.LogError(\"Unable to set default locale: '{}'\".format(ex))\n wx.Log.SetActiveTarget(orig)\n try:\n locale.setlocale(locale.LC_ALL, lang.replace('_', '-'))\n except (ValueError, locale.Error) as ex:\n locale.setlocale(locale.LC_ALL, lang.replace('-', '_'))\n target = wx.LogStderr()\n orig = wx.Log.SetActiveTarget(target)\n with open('./launch.log', 'a') as fp:\n fp.write(f'wxApp_LocaleFix.InitLocale:except-1 Unable to set default locale: \\'{ex}\\'\\n')\n print(\"Unable to set default locale: '{}'\".format(ex))\n wx.LogError(\"Unable to set default locale: '{}'\".format(ex))\n wx.Log.SetActiveTarget(orig)",
"def make_utf8_env():\n global _CACHED_ENV\n if not _CACHED_ENV:\n # LANG are in the form of <language>[.<encoding>[@<modifier>]]\n # We want to replace the \"encoding\" part with UTF-8\n lang_re = re.compile('\\.([^@]*)')\n\n env = os.environ.copy()\n lang = env.get('LANG', DEFAULT_LANG)\n if lang_re.search(lang):\n lang = lang_re.sub('.UTF-8', lang)\n else:\n lang = DEFAULT_LANG\n\n env['LANG'] = lang\n _CACHED_ENV = env\n return _CACHED_ENV",
"def _finalize_env(self, env: Dict[str, str]) -> None:\n\n # add the applicable kernel_id and language to the env dict\n env['KERNEL_ID'] = self.kernel_id\n\n kernel_language = 'unknown-kernel-language'\n if len(self.kernel_spec.language) > 0:\n kernel_language = self.kernel_spec.language.lower()\n # if already set in env: stanza, let that override.\n env['KERNEL_LANGUAGE'] = env.get('KERNEL_LANGUAGE', kernel_language)\n\n # Remove any potential sensitive (e.g., passwords) or annoying values (e.g., LG_COLORS)\n for k in env_pop_list:\n env.pop(k, None)",
"def default_locale(category: str | None = None, aliases: Mapping[str, str] = LOCALE_ALIASES) -> str | None:\n varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')\n for name in filter(None, varnames):\n locale = os.getenv(name)\n if locale:\n if name == 'LANGUAGE' and ':' in locale:\n # the LANGUAGE variable may contain a colon-separated list of\n # language codes; we just pick the language on the list\n locale = locale.split(':')[0]\n if locale.split('.')[0] in ('C', 'POSIX'):\n locale = 'en_US_POSIX'\n elif aliases and locale in aliases:\n locale = aliases[locale]\n try:\n return get_locale_identifier(parse_locale(locale))\n except ValueError:\n pass\n return None",
"def setPortalLocale( self ):\n info = getLanguageInfo( self )\n\n # find default and effective locale settings\n def_locale = info.get( sys.platform + '_locale' ) or info.get( os.name + '_locale' )\n cur_locale = getlocale()\n cur_locale = None not in cur_locale and '.'.join( cur_locale ) or ''\n\n # check whether locale is already ok\n if def_locale is None or cur_locale.lower() == def_locale.lower():\n return\n\n # change effective locale\n try:\n setlocale( LC_ALL, def_locale )\n except Exceptions.LocaleError:\n pass",
"def set_i18n(lang, language=None):\n import gettext\n import locale\n import warnings\n import os\n\n try:\n locale.setlocale(locale.LC_ALL, lang)\n locale.setlocale(locale.LC_MESSAGES, language or lang)\n os.environ[\"LANG\"] = lang\n os.environ[\"LANGUAGE\"] = language or lang.split(\".\")[0]\n except locale.Error:\n warnings.warn(f\"locale is not supported: {lang}\")\n gettext.bindtextdomain(\"messages\", localedir=LOCALEDIR)",
"def lang_init():\n _locale, _encoding = locale.getdefaultlocale() # Default system values\n path = os.path.join(os.path.dirname(sys.argv[0]), 'localization/lang')\n if os.path.exists(path):\n lang = gettext.translation('UnrulyPuzzlePython', path, [_locale],\n fallback=True)\n else:\n lang = gettext.translation('UnrulyPuzzlePython', path,\n fallback=True)\n return lang.gettext",
"def __initializeLocale(self):\n langdomain = 'tortugaStrings'\n\n # Locate the Internationalization stuff\n localedir = '../share/locale' \\\n if os.path.exists('../share/locale') else \\\n os.path.join(self._cm.getRoot(), 'share/locale')\n\n gettext.install(langdomain, localedir)",
"def set_default_language(language_code):\n thread_locals.DEFAULT_LANGUAGE = language_code",
"def clean_translated_locales():\r\n for locale in CONFIGURATION.translated_locales:\r\n clean_locale(locale)",
"def set_locale_de():\n try:\n if platform.system() == \"Windows\":\n locale.setlocale(locale.LC_ALL, \"German\")\n else:\n locale.setlocale(locale.LC_ALL, \"de_DE.utf8\")\n except locale.Error:\n pass",
"def set_envvars(self):\n # self.logger.trace(\"update os.environ with %s\", self.environ)\n for key in os.environ:\n current = self.environ.get(key)\n if current is None:\n del os.environ[key]\n for key, value in self.environ.items():\n if value is not None:\n os.environ[key] = str(value)",
"def initFromEnv(self):\n #self.command = 'scram' # SB I think this line is not needed\n self[\"SCRAM_ARCH\"] = None\n\n if 'SCRAM_ARCH' in os.environ:\n self[\"SCRAM_ARCH\"] = os.environ[\"SCRAM_ARCH\"]\n else:\n stdout, _, _ = execute_command(command='scram arch')\n self[\"SCRAM_ARCH\"] = stdout\n\n try:\n self[\"CMSSW_BASE\"] = os.environ[\"CMSSW_BASE\"]\n self[\"CMSSW_VERSION\"] = os.environ[\"CMSSW_VERSION\"]\n# Commenting these two out. I don't think they are really needed\n# self.cmsswReleaseBase = os.environ[\"CMSSW_RELEASE_BASE\"]\n# self.localRT = os.environ[\"LOCALRT\"]\n except KeyError as ke:\n self[\"CMSSW_BASE\"] = None\n self[\"CMSSW_VERSION\"] = None\n# self.cmsswReleaseBase = None\n# self.localRT = None\n msg = \"Please make sure you have setup the CMS enviroment (cmsenv). Cannot find %s in your env\" % str(ke)\n msg += \"\\nPlease refer to https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCRAB3Tutorial#CMS_environment for how to setup the CMS enviroment.\"\n raise EnvironmentException(msg)",
"def _init_env_variables(self):\n raise NotImplementedError()",
"def _init_env_variables(self):\n raise NotImplementedError()",
"def _init_env_variables(self):\n raise NotImplementedError()",
"def _init_env_variables(self):\n raise NotImplementedError()",
"def _init_env_variables(self):\n raise NotImplementedError()",
"def _init_env_variables(self):\n raise NotImplementedError()",
"async def setup_env(self, *args, **kwargs):\n os.environ[\"SLUGIFY_USES_TEXT_UNIDECODE\"] = \"yes\"",
"def check_environ():\n global _environ_checked\n if _environ_checked:\n return\n\n if os.name == 'posix' and 'HOME' not in os.environ:\n import pwd\n os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]\n\n if 'PLAT' not in os.environ:\n os.environ['PLAT'] = _sysconfig.get_platform()\n\n _environ_checked = 1",
"def load_evironment():\n environment = Utility.load_yaml(os.getenv(\"system_file\", \"./system.yaml\"))\n for key in environment:\n if key in os.environ:\n environment[key] = os.getenv(key)\n Utility.environment = environment",
"def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language",
"def getDefaultLocaleLanguage():\n # Setup textdomain\n try:\n locale.bindtextdomain(TEXT_DOMAIN, DEFAULT_LOCALE_PATH)\n except AttributeError:\n log_func.warning(u'Locale module not support text domain')\n\n language = locale.getlocale()[0]\n\n if sys_func.isWindowsPlatform():\n if language in WINDOWS2UNIX_LANGUAGE:\n language = WINDOWS2UNIX_LANGUAGE.get(language, DEFAULT_LOCALE)\n else:\n try:\n item1, item2 = language.split('_')\n language = '_'.join((item1[:2].lower(), item2[:2].upper()))\n except:\n log_func.fatal(u'Error get language')\n language = DEFAULT_LOCALE\n return language",
"def LocalEnv(local_env):\n old_env = os.environ.copy()\n os.environ.update(local_env)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)",
"def generic_env_configure_vars(self, verbose=False):\n\n if self.settings.os == \"Windows\":\n self.output.fatal(\"Cannot build on Windows, sorry!\")\n return\n\n if self.settings.os == \"Linux\" or self.settings.os == \"Macos\":\n libs = 'LIBS=\"%s\"' % \" \".join([\"-l%s\" % lib for lib in self.deps_cpp_info.libs])\n ldflags = 'LDFLAGS=\"%s\"' % \" \".join([\"-L%s\" % lib for lib in self.deps_cpp_info.lib_paths]) \n archflag = \"-m32\" if self.settings.arch == \"x86\" else \"\"\n cflags = 'CFLAGS=\"-fPIC %s %s\"' % (archflag, \" \".join(self.deps_cpp_info.cflags))\n cpp_flags = 'CPPFLAGS=\"%s %s\"' % (archflag, \" \".join(self.deps_cpp_info.cppflags))\n command = \"env %s %s %s %s\" % (libs, ldflags, cflags, cpp_flags)\n # elif self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\":\n # cl_args = \" \".join(['/I\"%s\"' % lib for lib in self.deps_cpp_info.include_paths])\n # lib_paths= \";\".join(['\"%s\"' % lib for lib in self.deps_cpp_info.lib_paths])\n # command = \"SET LIB=%s;%%LIB%% && SET CL=%s\" % (lib_paths, cl_args)\n # if verbose:\n # command += \" && SET LINK=/VERBOSE\"\n \n return command",
"def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0"
]
| [
"0.68579626",
"0.65727067",
"0.65229607",
"0.61467046",
"0.61113185",
"0.5992732",
"0.5751856",
"0.5712392",
"0.5593329",
"0.5477767",
"0.5438577",
"0.5424868",
"0.5334427",
"0.53033906",
"0.53014386",
"0.5283837",
"0.5225252",
"0.5225252",
"0.5225252",
"0.5225252",
"0.5225252",
"0.5225252",
"0.5204258",
"0.5149182",
"0.5121257",
"0.5100356",
"0.5084576",
"0.5078548",
"0.5054988",
"0.50271744"
]
| 0.74926955 | 0 |
Tests to see if both locations are the same ie rank and file is the same. | def __eq__(self, other):
if not isinstance(other, self.__class__):
raise TypeError("Cannot compare other types with Location")
return int(self.rank) == int(other.rank) and \
int(self.file) == int(other.file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_same_entry(entry_1, entry_2):\r\n if entry_1 == entry_2:\r\n return True\r\n if os.path.realpath(entry_1) == os.path.realpath(entry_2):\r\n return True\r\n if (os.path.basename(entry_1) == os.path.basename(entry_2) and\r\n (os.path.basename(os.path.dirname(entry_1)) ==\r\n os.path.basename(os.path.dirname(entry_2))) and\r\n os.path.basename(os.path.dirname(entry_1)).startswith('tmp')):\r\n return True\r\n return False",
"def samefile(self, other):\n other = os.fspath(other)\n if not isabs(other):\n other = abspath(other)\n if self == other:\n return True\n if not hasattr(os.path, \"samefile\"):\n return False\n return error.checked_call(os.path.samefile, self.strpath, other)",
"def are_files_equal(file1, file2):\n input_file_1 = open(file1, \"r\")\n input_file_2 = open(file2, \"r\")\n\n file1 = input_file_1.read()\n file2 = input_file_2.read()\n print(type(file1), file1, type(file2), file2)\n\n result =False\n if file1 == file1:\n result = True\n\n input_file_1.close()\n input_file_2.close()\n return result",
"def test_identical(self):\n write this test!",
"def samefile(path1, path2):\n try:\n return os.path.samefile(path1, path2)\n except OSError as err:\n if err.errno == 2: # ENOENT\n return False\n else:\n raise",
"def same_file(wavecar1, wavecar2, wavecar3):\n same = False\n if (filecmp.cmp(wavecar1, wavecar2, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar1, wavecar2))\n same = True\n if (filecmp.cmp(wavecar1, wavecar3, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar1, wavecar3))\n same = True\n if (filecmp.cmp(wavecar2, wavecar3, shallow=False)):\n print(\"Serious problem:: {} and {} are the same\".format(wavecar2, wavecar3))\n same = True\n\n if same:\n print(\"It seems that you are using same files to do finite difference, exit\")\n print(\"\\tComment the 'same_file' checker if you know what you are doing\")\n raise SystemExit",
"def mapsMatch(m1,m2):\n same = True\n f1 = file(m1,'r').readlines()\n f2 = file(m2,'r').readlines()\n for i, row in enumerate(f1):\n row = row.strip().split()\n row2 = f2[i].strip().split()\n if row[0] <> row2[0]:\n\t same = False\n break\n return same",
"def _verify_archive_equality(self, file1, file2):\r\n temp_dir_1 = mkdtemp()\r\n temp_dir_2 = mkdtemp()\r\n try:\r\n extract_source(file1, temp_dir_1)\r\n extract_source(file2, temp_dir_2)\r\n return directories_equal(temp_dir_1, temp_dir_2)\r\n\r\n finally:\r\n shutil.rmtree(temp_dir_1)\r\n shutil.rmtree(temp_dir_2)",
"def test_image_location():\n from napari.layers.image.experimental._image_location import ImageLocation\n\n layer1 = _create_layer()\n layer2 = _create_layer()\n\n locations1_0 = (\n ImageLocation(layer1, (0, 0)),\n ImageLocation(layer1, (0, 0)),\n )\n\n locations1_1 = (\n ImageLocation(layer1, (0, 1)),\n ImageLocation(layer1, (0, 1)),\n )\n\n locations2_0 = (\n ImageLocation(layer2, (0, 0)),\n ImageLocation(layer2, (0, 0)),\n )\n\n locations2_1 = (\n ImageLocation(layer2, (0, 1)),\n ImageLocation(layer2, (0, 1)),\n )\n\n # All identical pairs should be the same.\n assert locations1_0[0] == locations1_0[1]\n assert locations1_1[0] == locations1_1[1]\n assert locations2_0[0] == locations2_0[1]\n assert locations2_1[0] == locations2_1[1]\n\n # Nothing else should be the same\n for i in range(0, 2):\n assert locations1_0[i] != locations1_1[i]\n assert locations1_0[i] != locations2_0[i]\n assert locations1_0[i] != locations2_1[i]",
"def compare_contents(lhs, rhs):\n for filename in (lhs, rhs):\n if not os.path.exists(filename):\n return False\n\n with open(lhs, \"r\") as lhs_file, open(rhs, \"r\") as rhs_file:\n return lhs_file.read() == rhs_file.read()",
"def on_same_mount(cls, path1: os.PathLike, path2: os.PathLike) -> bool:\n return cls.get_mount(path1)[0] == cls.get_mount(path2)[0]",
"def _compare_file(path1, path2):\n\n try:\n return _open_file(path1) == _open_file(path2)\n except OSError:\n return False",
"def same(self, x, y):\n return self.find(x) == self.find(y)",
"def areSamePaths(path1, path2):\n\n path1 = os.path.abspath(os.path.normpath(path1))\n path2 = os.path.abspath(os.path.normpath(path2))\n\n if os.path.exists(path1) and os.path.exists(path2):\n path1 = getExternalUsePath(path1)\n path2 = getExternalUsePath(path2)\n\n path1 = os.path.normcase(path1)\n path2 = os.path.normcase(path2)\n\n return path1 == path2",
"def SrcDstSame(self, src_uri, dst_uri):\n if src_uri.is_file_uri() and dst_uri.is_file_uri():\n # Translate a/b/./c to a/b/c, so src=dst comparison below works.\n new_src_path = re.sub('%s+\\.%s+' % (os.sep, os.sep), os.sep,\n src_uri.object_name)\n new_src_path = re.sub('^.%s+' % os.sep, '', new_src_path)\n new_dst_path = re.sub('%s+\\.%s+' % (os.sep, os.sep), os.sep,\n dst_uri.object_name)\n new_dst_path = re.sub('^.%s+' % os.sep, '', new_dst_path)\n return (src_uri.clone_replace_name(new_src_path).uri ==\n dst_uri.clone_replace_name(new_dst_path).uri)\n else:\n return src_uri.uri == dst_uri.uri",
"def check_duplicate(fp1, fp2):\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False",
"def samefile(path1, path2):\n # Handles path-like objects and checks if storage\n path1, path1_is_storage = format_and_is_storage(path1)\n path2, path2_is_storage = format_and_is_storage(path2)\n\n # Local files: Redirects to \"os.path.samefile\"\n if not path1_is_storage and not path2_is_storage:\n return os_path_samefile(path1, path2)\n\n # One path is local, the other storage\n if not path1_is_storage or not path2_is_storage:\n return False\n\n with handle_os_exceptions():\n # Paths don't use same storage\n system = get_instance(path1)\n if system is not get_instance(path2):\n return False\n\n # Relative path are different\n elif system.relpath(path1) != system.relpath(path2):\n return False\n\n # Same files\n return True",
"def equal(self, file1, file2):\n\n if file1.size != file2.size:\n return False\n\n # Compare stat\n if self.use_stat and not self._equal_stat(file1, file2):\n return False\n\n # Compare times\n if self.use_times and not self._equal_times(file1, file2):\n return False\n\n # Compare attributes\n if self.use_attributes and not self._equal_attributes(file1, file2):\n return False\n\n # TODO: Optionally diff hashes\n\n return True",
"def isSameName(self, other):\n if not isinstance(other, self.__class__):\n return 0\n if self.data.has_key('name') and \\\n other.data.has_key('name') and \\\n build_name(self.data, canonical=0) == \\\n build_name(other.data, canonical=0):\n return 1\n if self.accessSystem == other.accessSystem and \\\n self.characterID is not None and \\\n self.characterID == other.characterID:\n return 1\n return 0",
"def test_check_mapping_file_correct_file(self):\r\n\r\n # Use valid data, default parameters\r\n check_mapping_file(mapping_fp=self.correct_mapping_fp,\r\n output_dir=self.output_dir,\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_data_correct_input)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_correct_input)\r\n self.assertEqual(log_data, self.expected_log_data_correct_input)\r\n\r\n # With additional parameters added should not change results using\r\n # same valid input data\r\n check_mapping_file(mapping_fp=self.correct_mapping_fp,\r\n output_dir=self.output_dir,\r\n has_barcodes=True,\r\n char_replace=\"A\",\r\n verbose=False,\r\n variable_len_barcodes=True,\r\n disable_primer_check=True,\r\n added_demultiplex_field=None)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_data_correct_input)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_correct_input)\r\n self.assertEqual(log_data, self.expected_log_data_correct_input)",
"def __eq__(self, other):\n return type(self) == type(other) and self._full_path == other.full_path",
"def proj_is_same(p1, p2):\n if has_gdal:\n # this is more robust, but gdal is a pain\n s1 = osr.SpatialReference()\n s1.ImportFromProj4(p1.srs)\n s2 = osr.SpatialReference()\n s2.ImportFromProj4(p2.srs)\n return s1.IsSame(s2) == 1 # IsSame returns 1 or 0\n else:\n # at least we can try to sort it\n p1 = '+'.join(sorted(p1.srs.split('+')))\n p2 = '+'.join(sorted(p2.srs.split('+')))\n return p1 == p2",
"def _io_similar(lhs, rhs):\n ldecl = lhs.decl()\n rdecl = rhs.decl()\n if not ldecl[::2] == rdecl[::2]: # names are the same\n return False\n size = len(ldecl)\n return all(ldecl[i] is rdecl[i] for i in range(1, size, 2))",
"def __eq__(self, other) -> bool:\r\n if isinstance(other, Square):\r\n if (self.board, self.file, self.rank) == (\r\n other.board, other.file, other.rank):\r\n return True\r\n \r\n return False",
"def compare(src, dest):\n xsrc, xdest = os.path.exists(src), os.path.exists(dest)\n if not xsrc:\n return Cmp.nosrc\n if not xdest:\n return Cmp.nodest\n with open(src, \"rb\") as s:\n csrc = sha256(s.read()).digest()\n if xdest:\n with open(dest, \"rb\") as d:\n cdest = sha256(d.read()).digest()\n else:\n cdest = b\"\"\n if csrc == cdest:\n return Cmp.same\n return Cmp.differ",
"def __compareImage(self, file1, file2):\n # arg=self.__validateString(str_arg)\n # file1, file2=arg.split(' ', 1)\n try:\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n if img1.size != img2.size:\n return False\n by1 = img1.tobytes()\n by2 = img2.tobytes()\n # format r,g,b,255,r,g,b,255, 3 bytes = 1 point, 255=separator, total 4 bytes\n l = len(by1) / 4\n # total points and same points\n tp = 0\n sp = 0\n for j in range(l):\n i = j * 4\n tp += 1\n if by1[i] == by2[i] and by1[i + 1] == by2[i + 1] and by1[i + 2] == by2[i + 2]:\n sp += 1\n # max to 2% diff allowed\n if tp * 0.98 > sp:\n return False\n else:\n return True\n except Exception, e:\n printLog(self.threadName + \"Exception in __compareImage: %s\" % e.message, logging.ERROR)\n traceback.print_exc()\n return False\n finally:\n img1 = None\n img2 = None",
"def names_are_equal(filesystem_name, fixture_name):\n if filesystem_safe(filesystem_name) == fixture_name:\n return True\n return False",
"def same_rank(self, other: 'Piece') -> bool:\n\n return self.rank == other.rank",
"def are_similar(first_coords: List[Tuple[int, int]], second_coords: List[Tuple[int, int]]) -> bool:\n # Step 1: Get angles of each triangle\n # Step 2: Compare grades of two triangles\n # Step 3: If two angles are equal then first triangle is similar to second triangle\n pass",
"def test_coords_same_direction(self): # test_change_coords = method\n mi = (0,1,1.5708)\n mj = (0,2,1.5708)\n result = new_mj_coords(mi, mj)\n self.assertEqual(result, (0.3317021649341794, 0.9433841602327115, 0.0))\n\n '''\n the method .assertEqual(a,b) is equivalent to a == b\n other methods include: .assertIs(a,b) = a is b, .assertIsNone(x) = x is None,\n .assertIn(a,b) = a in b, and .assertIsInstance(a,b) = isinstance(a, b)\n\n\n '''"
]
| [
"0.6606223",
"0.64625067",
"0.6404751",
"0.6316974",
"0.62666816",
"0.62198895",
"0.6162294",
"0.60664684",
"0.603733",
"0.6031126",
"0.6018793",
"0.5989421",
"0.59823585",
"0.5964671",
"0.59412783",
"0.5938612",
"0.58751655",
"0.5868189",
"0.5863637",
"0.58289623",
"0.580593",
"0.58001876",
"0.5799874",
"0.5784879",
"0.5761047",
"0.57503384",
"0.5749849",
"0.5743473",
"0.5742572",
"0.57244456"
]
| 0.6970993 | 0 |
Returns if the move is on the board or not. If the rank and file are both in between 0 and 7, this method will return True. | def on_board(self):
if -1 < self._rank < 8 and \
-1 < self._file < 8:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def in_board(self,pos : np.ndarray) -> bool:\r\n if 0 > pos[0] or pos[0] >= BOARD_SIZE:\r\n return False\r\n if 0 > pos[1] or pos[1] >= BOARD_SIZE:\r\n return False\r\n\r\n return True",
"def is_on_board(self, r, c):\r\n return 0 <= r <= 7 and 0 <= c <= 7",
"def is_winning(self):\n\n current_board = self.current_board\n\n # check rows\n for row in current_board:\n row = set(row)\n if (\"X\" not in row and \"-\" not in row) or (\"O\" not in row and \"-\" not in row):\n return True\n\n # check columns\n for i in range(len(current_board)):\n column_to_check = set()\n \n for j in range(len(current_board)):\n column_to_check.add(current_board[j][i])\n\n if (\"X\" not in column_to_check and \"-\" not in column_to_check) or (\"O\" not in column_to_check and \"-\" not in column_to_check):\n return True\n \n # check diagonals\n forward_diagonal_check = set()\n backward_diagonal_check = set()\n \n for i in range(len(current_board)):\n forward_diagonal_check.add(current_board[i][i])\n backward_diagonal_check.add(current_board[i][len(current_board)-1-i])\n\n if forward_diagonal_check == {\"X\"} or forward_diagonal_check == {\"O\"}:\n return True\n\n if backward_diagonal_check == {\"X\"} or backward_diagonal_check == {\"O\"}:\n return True",
"def on_board(self, pos):\n i, j = pos\n return 0 <= i < COLS and 0 <= j < ROWS",
"def is_on_board(x: int, y: int) -> bool:\n return x >= 0 and x < BOARDWIDTH and y < BOARDHEIGHT",
"def win_game(board :list) -> bool:\n if board == win_state:\n return True\n return False",
"def check_win(self, board, move):\n for i, j, k in self.winning_cases:\n if board[i] == move and board[j] == move and board[k] == move:\n return True\n return False",
"def validBoard():\r\n\r\n\tglobal move1, move2\r\n\r\n\tif move1==move2 or move1-move2==1:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False",
"def board_tiles_availability(self):\n for row in range(GameData.rows):\n for col in range(GameData.columns):\n if self.board[row][col] == 0:\n return False\n # Game is draw, no more moves left!\n return True",
"def won(board):\n\n # grab position of car and position that needs to be checked\n list_pos = board.width * board.cars[0].y + board.cars[0].x\n checkpos = int(list_pos) + 2\n\n # check if redcar is directly in front of exit\n if list_pos == (board.width * board.exity + board.exitx):\n print(\"game has been won in {} turns with the following moves: {}.\".format(len(board.moves), board.moves))\n return True\n\n # checks coordinates between exit and redcar\n for x in range(checkpos, ((board.width * board.exity + board.exitx) + 2)):\n\n if board.coordinates[x][0] == True:\n break\n\n # checks if coordinate before exit is empty\n elif x == ((board.width * board.exity + board.exitx) + 1) and board.coordinates[x][0] == False:\n print(\"game has been won in {} turns with the following moves: {}.\".format(len(board.moves), board.moves))\n return True\n\n # return false if not won\n return False",
"def _check_board(self):\n return self.game_board.check_board(self.tetrino_set)",
"def inBoard(self, row, col):\n return 0 <= row < self.rows and 0 <= col < self.cols",
"def winning_move(board, position, player):\n win = list(player*3)\n if get_row(board, position) == win:\n return True\n elif get_column(board, position) == win:\n return True\n elif position % 2 != 0:\n # odd positions are on the diagonals\n return get_diagonal(board, 1) == win or get_diagonal(board, 3) == win\n return False",
"def bornoff(self, board):\n res = False\n if (self.player):\n if (reduce(lambda x, y: x+y, board.p1vec) < reduce(lambda x, y: x+y, self.board.p1vec)):\n res = True\n else:\n if (reduce(lambda x, y: x+y, board.p2vec) < reduce(lambda x, y: x+y, self.board.p2vec)):\n res = True\n return res",
"def is_in_board(self):\n return self.is_alive()",
"def is_game_won(self) -> int:\n\n b = self.board\n for c1, c2, c3, c4 in _WINDOWS:\n if b[c1] and (b[c1] == b[c2] == b[c3] == b[c4]):\n print(\"win\", c1, c2, c3, c4)\n return b[c1]",
"def available_moves(self) -> bool:\n has_move = False\n for i in range(self.col):\n if self.valid_column(i):\n has_move = True\n return has_move",
"def game_tie(self):\n\n shape = self.board.shape\n if np.count_nonzero(self.board) == (shape[0] * shape[1]):\n # The board is full\n player = 0\n return True\n else:\n return False",
"def __is_board_full(self):\r\n for row in self.__board:\r\n if {self.PLAYER1, self.PLAYER2} & set(row) != 0:\r\n return False\r\n return True",
"def can_move(board: Board, whites_turn: bool) -> bool:\n friendly_pieces = WHITE_PIECES if whites_turn else BLACK_PIECES\n temp_board = board\n\n for i, row in enumerate(board):\n for j, piece in enumerate(row):\n position = (i, j)\n\n if piece in friendly_pieces:\n new_positions = get_possible_moves(position, board)\n\n for new_pos in new_positions:\n temp_board = change_position(board, new_pos, piece)\n temp_board = clear_position(temp_board, position)\n\n if not is_in_check(temp_board, whites_turn):\n return True\n\n return False",
"def is_win(my_board):\n return np.count_nonzero(my_board == CLOSED) == NUM_MINES",
"def check_win(self):\n for pos in self.win_set:\n # s would be all 1 if all positions of a winning move is fulfilled\n # otherwise 1s and 0s\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False",
"def checkAll(self, player, board):\n #retrieve current moves of the player who made the last move\n currentMoves = self.getPlayerMoves(player,board)\n\n #check column win\n is_col_win = self.checkWin(currentMoves, self.columnWins)\n if is_col_win != False:\n return True\n\n #check row win\n is_row_win = self.checkWin(currentMoves, self.rowWins)\n if is_row_win != False:\n return True\n\n #check diagonal win\n is_diag_win = self.checkWin(currentMoves, self.diagonalWins)\n if is_diag_win != False:\n return True\n else:\n return False",
"def has_won(board, player):\n return False",
"def has_won(board, player):\r\n return False",
"def checkWin(self, board):\n for w in self.wins:\n if board[w[0]] != ' ' and (board[w[0]] == board[w[1]] == board[w[2]]):\n self.winner = board[w[0]]\n return True",
"def row_win(board):\n\tfor row in range(3):\n\t\tif board[row][0] != EMPTY and board[row][0] == board[row][1] == board[row][2]:\n\t\t\treturn True\n\treturn False",
"def process_move(player,board):\r\n\r\n \r\n print(str(player) + \"'s turn\") #shows which player's turn it is\r\n col = player.next_move(board)\r\n board.add_checker(player.checker,col) #adds checker to specific column\r\n print()\r\n print(board)\r\n print()\r\n if board.is_win_for(player.checker) == True:\r\n print(player, \"wins in\", player.num_moves,'moves.\\nCongratulations!')\r\n return True\r\n elif board.is_win_for(player.checker)== False and board.is_win_for(player.opponent_checker()) == False and board.is_full() == True:\r\n print(\"It's a tie!\")\r\n return True\r\n else:\r\n return False",
"def col_win(board):\n\tfor col in range(3):\n\t\tif board[0][col] != EMPTY and board[0][col] == board[1][col] == board[2][col]:\n\t\t\treturn True\n\treturn False",
"def game_over(self):\n\n if self._number_of_moves == 9:\n return True\n\n return self._number_of_moves == 9 or self.winner_found()"
]
| [
"0.702184",
"0.6970916",
"0.6834526",
"0.6800029",
"0.6766021",
"0.67560077",
"0.668015",
"0.65668744",
"0.6562663",
"0.65373456",
"0.65343285",
"0.65004635",
"0.64892584",
"0.6485119",
"0.64796084",
"0.64721423",
"0.6472055",
"0.6472033",
"0.6468038",
"0.6465691",
"0.6419337",
"0.6360592",
"0.63578326",
"0.6304014",
"0.6302154",
"0.6284906",
"0.6276378",
"0.627106",
"0.62675184",
"0.6264"
]
| 0.85112965 | 0 |
Shifts in direction provided by ``Direction`` enum. | def shift(self, direction):
try:
if direction == Direction.UP:
return self.shift_up()
elif direction == Direction.DOWN:
return self.shift_down()
elif direction == Direction.RIGHT:
return self.shift_right()
elif direction == Direction.LEFT:
return self.shift_left()
else:
raise IndexError("Invalid direction {}".format(direction))
except IndexError as e:
raise IndexError(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shift(self, direction):\n direct, pos = tuple(direction)\n\n board = {'L': self.rows, 'R': self.rows, 'D': self.cols, 'U': self.cols}[direct]\n board[int(pos)].shift(direction=self.direct[direct])",
"def move(self, direction):\n pass",
"def shiftDir(self, direction, n):\n assert Direction.isDir(direction), \"incorrect type of arg direction: should be a Direction, is {}\".format(type(direction))\n assert isinstance(n, AxisDistance), 'incorrect type of arg n: should be type AxisDistance, is type {}'.format(type(n))\n direction = Direction(direction)\n self.x += direction.dx * n\n self.y += direction.dy * n\n return self",
"def move(self, direction):\r\n self.stored_direction = direction",
"def turn(self, dir):\n if dir.upper() == 'R':\n if self.direction == 3:\n self.direction = 0\n else:\n self.direction += 1\n if dir.upper() == 'L':\n if self.direction == 0:\n self.direction = 3\n else:\n self.direction -= 1",
"def move(self, direction: Direction) -> None:\n if direction == Direction.left and self.position > 0:\n self.position -= 1\n elif direction == Direction.right:\n self.position += 1",
"def turn(self, world, direction):\n world.__turn__(self.ID, int(direction))",
"def move(self, direction):\n\n if direction == \"north\":\n self.go_and_update(-1, 0)\n\n elif direction == \"south\":\n self.go_and_update(1, 0)\n\n elif direction == \"east\":\n self.go_and_update(0, 1)\n\n elif direction == \"west\":\n self.go_and_update(0, -1)",
"def shift(self, *amounts):\n return shift(self, *amounts)",
"def move(self, direction=None):\n if direction == 'rotate':\n self.positions = self.simple_rotate()\n else:\n self.origin = self.get_new_origin(direction=direction)",
"def move(self, direction):\n # replace with your code\n pass",
"def move(self, direction):\n # replace with your code\n pass",
"def move_in_direction(self, direction):\n if direction == NORTH:\n self.__position[y] += 1\n elif direction == NORTHEAST:\n self.__position[x] += 1\n self.__position[y] += 1\n elif direction == EAST:\n self.__position[x] += 1\n elif direction == SOUTHEAST:\n self.__position[x] += 1\n self.__position[y] -= 1\n elif direction == SOUTH:\n self.__position[y] -= 1\n elif direction == SOUTHWEST:\n self.__position[x] -= 1\n self.__position[y] -= 1\n elif direction == WEST:\n self.__position[x] -= 1\n elif direction == NORTHWEST:\n self.__position[x] -= 1\n self.__position[y] += 1",
"def _move(self, direction, difference):\n future_tile_number = self.get_number() + difference\n if future_tile_number in range(1, Tile.total_tiles + 1):\n future_tile = Tile.get_tile(future_tile_number)\n if future_tile.walkable:\n self.set_target(future_tile)\n self.rotate(direction)",
"def move_step(self, direction):\n x = self.objects[0].x\n y = self.objects[0].y\n if direction == 0 and y >= 1:\n self.objects[0].y -= 1\n elif direction == 1 and y <= self.size_y - 2:\n self.objects[0].y += 1\n elif direction == 2 and x >= 1:\n self.objects[0].x -= 1\n elif direction == 3 and x <= self.size_x - 2:\n self.objects[0].x += 1",
"def shift_column(self, coords, direction):\n self.shift_cells(self.get_column(coords, direction), direction)",
"def move(self, direction, cycles):\n\t\tpass",
"def MoveRightStep(self):\n if self.facing == 0:\n self.facing = 1\n self.x += self.stepLeft\n elif self.facing == 1:\n self.facing = 2\n self.y += self.stepUp\n elif self.facing == 2:\n self.facing = 3\n self.x -= self.stepRight\n elif self.facing == 3:\n self.facing = 0\n self.y -= self.stepDown",
"def move(self, direction, step=1.):\n if direction in ('up', 'down'):\n vector = self.up * (1. if direction == 'up' else -1.)\n elif direction in ('left', 'right'):\n vector = self.side * (1. if direction == 'right' else -1.)\n elif direction in ('forward', 'backward'):\n vector = self.direction * (1. if direction == 'forward' else -1.)\n else:\n raise ValueError('Unsupported direction: %s' % direction)\n\n self.position += step * vector",
"def turn_right(self):\n temp = self.direction[0]\n self.direction[0] = -self.direction[1]\n self.direction[1] = temp",
"def shifted(self, shift):\n new_location = None if self.location is None else self.location + shift\n reference = None if self.reference is None else self.reference + shift\n return self.copy_with_changes(\n location=new_location, reference=reference, derived_from=self,\n )",
"def set_direction(self, dir):\n if dir == 0:\n self.direction = [0, -1]\n elif dir == 1:\n self.direction = [1, 0]\n elif dir == 2:\n self.direction = [0, 1]\n elif dir == 3:\n self.direction = [-1, 0]",
"def setDirection(self,stepDir = 2):\n pass",
"def move_fine(self, direction, count=1):\n if self._direction != direction and self.simulate_backlash:\n self._direction = direction\n self._move(direction, count, 1)\n self.backlash_count += 1\n else:\n self._direction = direction\n self._move(direction, count, 1)",
"def move(self, direction):\r\n # replace with your code\r\n row_dir = OFFSETS[direction][0]\r\n col_dir = OFFSETS[direction][1]\r\n \r\n if row_dir == 0:\r\n new_cells = self._cells\r\n new_dir = col_dir\r\n else:\r\n new_tuples = zip(*self._cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n new_dir = row_dir\r\n \r\n tmp_cells = []\r\n for lists in new_cells:\r\n lists = lists[::new_dir]\r\n merge_lists = merge(lists)\r\n tmp_cells.append(merge_lists[::new_dir])\r\n \r\n if row_dir == 0:\r\n self._cells = tmp_cells\r\n else:\r\n new_tuples = zip(*tmp_cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n self._cells = new_cells\r\n \r\n self.new_tile()",
"def rotate(self, direction):\n if type(direction) is not int:\n raise TypeError(\"direction should be int\")\n if direction != -1 and direction != 1:\n raise ValueError(\"direction should be (-1 or 1)\")\n size = self.size\n for i in range(size):\n for j in range(i, size):\n if direction == -1:\n self.grid[i][j], self.grid[size - 1 - j][i] = self.grid[size - 1 - j][i], self.grid[i][j]\n elif direction == 1:\n self.grid[i][j], self.grid[j][size - 1 - i] = self.grid[j][size - 1 - i], self.grid[i][j]",
"def steer(self, direction):\n\n if -1 <= direction <= 1:\n target_position = self.steering_limit * direction\n self.brick_pi.set_motor_position(\n self.motor_steer, -target_position)",
"def move(self, direction):\r\n dx = direction[0]\r\n dy = direction[1]\r\n\r\n self.head[0] += dx * 10\r\n self.head[1] += dy * 10\r\n\r\n for i in range(self.length - 1):\r\n self.body[i] = self.body[i + 1]\r\n self.body[-1] = copy.copy(self.head)",
"def _move(self):\n self.pos += self.direction # add direction vector\n self.direction += self.gravity # add gravity to direction\n self.direction = self.direction.elementwise() * self.drag # apply drag to direction",
"def move_ship(self,direction):\n assert isinstance(direction, str)\n assert direction == 'left' or direction == 'right'\n self.stay_on_screen()\n if direction == 'left':\n self.x -= SHIP_MOVEMENT\n elif direction == 'right':\n self.x += SHIP_MOVEMENT"
]
| [
"0.774642",
"0.692398",
"0.6842757",
"0.6679597",
"0.66020656",
"0.65728146",
"0.6467691",
"0.64622736",
"0.64192796",
"0.63760555",
"0.63722444",
"0.63722444",
"0.6337595",
"0.6268015",
"0.62238383",
"0.62154263",
"0.61994225",
"0.6179677",
"0.6145385",
"0.6136167",
"0.61124784",
"0.60972303",
"0.6082632",
"0.60198474",
"0.59963167",
"0.5977874",
"0.597644",
"0.5954213",
"0.5953427",
"0.5941206"
]
| 0.7574775 | 1 |
Finds Location shifted up by 1 | def shift_up(self, times=1):
try:
return Location(self._rank + times, self._file)
except IndexError as e:
raise IndexError(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shift_down(self, times=1):\n try:\n return Location(self._rank - times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def shift_up_right(self, times=1):\n try:\n return Location(self._rank + times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def locate(x, y):\n position(x * 6, y)",
"def test_jump_to_location_shift(self, cpu):\n for shift in range(0x0, 0xFFF):\n cpu.opcode = 0xB000 | shift\n cpu.jump_to_location_shift()\n cpu.program_counter += 2\n assert(cpu.program_counter == cpu.V_register[0] + shift)",
"def get_location(self, currentlocation, action):\n\t\t# Retrieve movement tuple from dictionary\n\t\tmovement = self.actions_dict[action]\n\t\t# Get new location using modulo of gridsize\n\t\tnewlocation = ((currentlocation[0]+movement[0]) % self.gridsize[0], (currentlocation[1]+movement[1]) % self.gridsize[1])\n\n\t\treturn newlocation",
"def shift_up_left(self, times=1):\n try:\n return Location(self._rank + times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def upright(self):\n return Coord([self.x + 1, self.y - 1])",
"def take_step(self, location, direction):\n if direction == RIGHT:\n step_location = (location[0]+1,location[1])\n elif direction == DOWN:\n step_location = (location[0], location[1]+1)\n else:\n return location\n\n if step_location[1] < self.rows and step_location[0] < self.columns:\n return step_location if self.is_space_open(step_location) else None",
"def shift_down_left(self, times=1):\n try:\n return Location(self._rank - times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def shift_down_right(self, times=1):\n try:\n return Location(self._rank - times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def get_next_position(self):",
"def move_toward(state, location):\n return move_relative(state, location, True)",
"def _change_offset(self, value, direction):\n if direction == 1:\n if value >= self.POSITIONS-1:\n value = 0\n else:\n value += 1\n elif direction == -1:\n if value == 0:\n value = self.POSITIONS-1\n else:\n value -= 1\n return value",
"def upleft(self):\n return Coord([self.x - 1, self.y - 1])",
"def take_step(self):\n if self.facing == 0:\n self.new_loc = (self.new_loc[0], self.new_loc[1] + 1)\n elif self.facing == 1:\n self.new_loc = (self.new_loc[0] + 1, self.new_loc[1])\n elif self.facing == 2:\n self.new_loc = (self.new_loc[0], self.new_loc[1] - 1)\n else:\n self.new_loc = (self.new_loc[0] - 1, self.new_loc[1])",
"def test_findXCoordinateFromDirection_up(self):\n actual_result = rules.findXCoordinateFromDirection(1)\n expected_result = 0\n self.assertEqual(actual_result, expected_result)",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def middleUp(self):",
"def FindClosestInsertedPoint(self, ):\n ...",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def downleft(self):\n return Coord([self.x - 1, self.y + 1])",
"def test_shift_point(self):\n point = (0,0)\n new_point = utils.shift_point(point, 3, 4)\n self.assertEqual((3,4), new_point)\n\n point = (-2.34, 1.19)\n new_point = utils.shift_point(point, 2.34, -1.19)\n self.assertEqual((0,0), new_point)",
"def look(self):\n self.location.look()",
"def add_on_land_position(self) -> Point2:\n return self.position.offset(Point2((-2.5, 0.5)))",
"def downright(self):\n return Coord([self.x + 1, self.y + 1])",
"def test_nearest_location_odd():\n assert nearest_location([(3, 6), (9, 13)], 7) == 0\n assert nearest_location([(3, 6), (9, 13)], 7, 1) == 1",
"def tryout_new_location(self):\n try_location = [0, 0]\n \n # try locations until a not-occupied location is found and not all folds are checked\n while try_location in self.occupied:\n\n # folds north everytime\n current_type = 2\n \n # check if location is possible \n try_location = self.assign_location(current_type)\n\n # if location is not possible, try next fold\n if try_location in self.occupied:\n continue\n # if location is possible, use location\n else:\n self.next_location = try_location\n return",
"def last_pos(self):\n return self.locs[self.indices[-1], 2:4]"
]
| [
"0.6270554",
"0.60757375",
"0.60245186",
"0.60240287",
"0.60180974",
"0.6000895",
"0.5944027",
"0.5933273",
"0.59240323",
"0.58775055",
"0.58667123",
"0.58546114",
"0.58382297",
"0.58361113",
"0.5834758",
"0.58302695",
"0.57927275",
"0.57927275",
"0.57927275",
"0.5768575",
"0.57489693",
"0.57281256",
"0.5722802",
"0.5701325",
"0.5692608",
"0.5662289",
"0.5659288",
"0.5633001",
"0.5616692",
"0.5609024"
]
| 0.6417926 | 0 |
Finds Location shifted down by 1 | def shift_down(self, times=1):
try:
return Location(self._rank - times, self._file)
except IndexError as e:
raise IndexError(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_next_position(self):",
"def take_step(self, location, direction):\n if direction == RIGHT:\n step_location = (location[0]+1,location[1])\n elif direction == DOWN:\n step_location = (location[0], location[1]+1)\n else:\n return location\n\n if step_location[1] < self.rows and step_location[0] < self.columns:\n return step_location if self.is_space_open(step_location) else None",
"def shift_down_left(self, times=1):\n try:\n return Location(self._rank - times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def downleft(self):\n return Coord([self.x - 1, self.y + 1])",
"def shift_down_right(self, times=1):\n try:\n return Location(self._rank - times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def locate(x, y):\n position(x * 6, y)",
"def shift_up(self, times=1):\n try:\n return Location(self._rank + times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def _change_offset(self, value, direction):\n if direction == 1:\n if value >= self.POSITIONS-1:\n value = 0\n else:\n value += 1\n elif direction == -1:\n if value == 0:\n value = self.POSITIONS-1\n else:\n value -= 1\n return value",
"def downright(self):\n return Coord([self.x + 1, self.y + 1])",
"def upright(self):\n return Coord([self.x + 1, self.y - 1])",
"def test_jump_to_location_shift(self, cpu):\n for shift in range(0x0, 0xFFF):\n cpu.opcode = 0xB000 | shift\n cpu.jump_to_location_shift()\n cpu.program_counter += 2\n assert(cpu.program_counter == cpu.V_register[0] + shift)",
"def get_location(self, currentlocation, action):\n\t\t# Retrieve movement tuple from dictionary\n\t\tmovement = self.actions_dict[action]\n\t\t# Get new location using modulo of gridsize\n\t\tnewlocation = ((currentlocation[0]+movement[0]) % self.gridsize[0], (currentlocation[1]+movement[1]) % self.gridsize[1])\n\n\t\treturn newlocation",
"def last_pos(self):\n return self.locs[self.indices[-1], 2:4]",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def upleft(self):\n return Coord([self.x - 1, self.y - 1])",
"def shift_up_right(self, times=1):\n try:\n return Location(self._rank + times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def move_toward(state, location):\n return move_relative(state, location, True)",
"def take_step(self):\n if self.facing == 0:\n self.new_loc = (self.new_loc[0], self.new_loc[1] + 1)\n elif self.facing == 1:\n self.new_loc = (self.new_loc[0] + 1, self.new_loc[1])\n elif self.facing == 2:\n self.new_loc = (self.new_loc[0], self.new_loc[1] - 1)\n else:\n self.new_loc = (self.new_loc[0] - 1, self.new_loc[1])",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def backtrack_to_start(board, end):\r\n cell = board.at(end)\r\n # print(cell)\r\n path = []\r\n lis = []\r\n while cell != None:\r\n path.append(cell)\r\n cell = cell.path_from\r\n for i in path[-1:]:\r\n for j in i.position:\r\n lis.append(j)\r\n next_move = lis[-4:-2]\r\n\r\n return next_move",
"def test_findXCoordinateFromDirection_up(self):\n actual_result = rules.findXCoordinateFromDirection(1)\n expected_result = 0\n self.assertEqual(actual_result, expected_result)",
"def middleUp(self):",
"def shift_up_left(self, times=1):\n try:\n return Location(self._rank + times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def FindClosestInsertedPoint(self, ):\n ...",
"def backward_character():\r\n set_point(point().offset(-1))",
"def offset(point):\n x = (floor(point.x, 20) + 200) / 20\n y = (180 - floor(point.y, 20)) / 20\n index = int(x + y * 20)\n return index",
"def add_on_land_position(self) -> Point2:\n return self.position.offset(Point2((-2.5, 0.5)))",
"def look(self):\n self.location.look()"
]
| [
"0.61814827",
"0.6127964",
"0.6118851",
"0.6116896",
"0.61094475",
"0.60757875",
"0.60212976",
"0.60003173",
"0.5983816",
"0.5965933",
"0.5955165",
"0.59535277",
"0.59151417",
"0.5878911",
"0.5878911",
"0.5878911",
"0.5869491",
"0.5867789",
"0.5857768",
"0.5830655",
"0.5778268",
"0.5738394",
"0.5733788",
"0.57264614",
"0.571474",
"0.57057863",
"0.57021147",
"0.5698474",
"0.5695476",
"0.56728405"
]
| 0.6511327 | 0 |
Finds Location shifted right by 1 | def shift_right(self, times=1):
try:
return Location(self._rank, self._file + times)
except IndexError as e:
raise IndexError(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shift_down_right(self, times=1):\n try:\n return Location(self._rank - times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def shift_up_right(self, times=1):\n try:\n return Location(self._rank + times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def downright(self):\n return Coord([self.x + 1, self.y + 1])",
"def shift_down_left(self, times=1):\n try:\n return Location(self._rank - times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def shift_down(self, times=1):\n try:\n return Location(self._rank - times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def test_jump_to_location_shift(self, cpu):\n for shift in range(0x0, 0xFFF):\n cpu.opcode = 0xB000 | shift\n cpu.jump_to_location_shift()\n cpu.program_counter += 2\n assert(cpu.program_counter == cpu.V_register[0] + shift)",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def upright(self):\n return Coord([self.x + 1, self.y - 1])",
"def downleft(self):\n return Coord([self.x - 1, self.y + 1])",
"def shift_left(self, times=1):\n try:\n return Location(self._rank, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def shift_up(self, times=1):\n try:\n return Location(self._rank + times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def locate(x, y):\n position(x * 6, y)",
"def shifted(self, shift):\n new_location = None if self.location is None else self.location + shift\n reference = None if self.reference is None else self.reference + shift\n return self.copy_with_changes(\n location=new_location, reference=reference, derived_from=self,\n )",
"def get_location(self, currentlocation, action):\n\t\t# Retrieve movement tuple from dictionary\n\t\tmovement = self.actions_dict[action]\n\t\t# Get new location using modulo of gridsize\n\t\tnewlocation = ((currentlocation[0]+movement[0]) % self.gridsize[0], (currentlocation[1]+movement[1]) % self.gridsize[1])\n\n\t\treturn newlocation",
"def shift_up_left(self, times=1):\n try:\n return Location(self._rank + times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def take_step(self, location, direction):\n if direction == RIGHT:\n step_location = (location[0]+1,location[1])\n elif direction == DOWN:\n step_location = (location[0], location[1]+1)\n else:\n return location\n\n if step_location[1] < self.rows and step_location[0] < self.columns:\n return step_location if self.is_space_open(step_location) else None",
"def last_pos(self):\n return self.locs[self.indices[-1], 2:4]",
"def shift_right(self):\n self.pointer = (self.pointer + 1) % len(self.data)",
"def test_shift_point(self):\n point = (0,0)\n new_point = utils.shift_point(point, 3, 4)\n self.assertEqual((3,4), new_point)\n\n point = (-2.34, 1.19)\n new_point = utils.shift_point(point, 2.34, -1.19)\n self.assertEqual((0,0), new_point)",
"def pos_right(self, x=1):\n\n self.x += x\n return self.pos(self.x, self.y)",
"def test_findXCoordinateFromDirection_left(self):\n actual_result = rules.findXCoordinateFromDirection(8)\n expected_result = -2\n self.assertEqual(actual_result, expected_result)",
"def _change_offset(self, value, direction):\n if direction == 1:\n if value >= self.POSITIONS-1:\n value = 0\n else:\n value += 1\n elif direction == -1:\n if value == 0:\n value = self.POSITIONS-1\n else:\n value -= 1\n return value",
"def get_next_position(self):",
"def add_on_land_position(self) -> Point2:\n return self.position.offset(Point2((-2.5, 0.5)))",
"def _right(self, index):\r\n return 2*index + 2",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def upleft(self):\n return Coord([self.x - 1, self.y - 1])",
"def find_shift(ref, img):\n im0 = prepare(ref)\n im1 = prepare(img)\n shift, error, diffphase = register_translation(im0, im1, 100)\n\n return shift"
]
| [
"0.6389681",
"0.62587845",
"0.6254561",
"0.6108448",
"0.6070218",
"0.6069615",
"0.60360247",
"0.60355735",
"0.5927134",
"0.58692807",
"0.58327454",
"0.5816636",
"0.58158803",
"0.5808757",
"0.5802878",
"0.5780936",
"0.57805306",
"0.57633096",
"0.574295",
"0.57252795",
"0.5681169",
"0.5676748",
"0.5644311",
"0.5621381",
"0.5620234",
"0.5612864",
"0.5612864",
"0.5612864",
"0.5590912",
"0.5563556"
]
| 0.64013916 | 0 |
Finds Location shifted left by 1 | def shift_left(self, times=1):
try:
return Location(self._rank, self._file - times)
except IndexError as e:
raise IndexError(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shift_down_left(self, times=1):\n try:\n return Location(self._rank - times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def downleft(self):\n return Coord([self.x - 1, self.y + 1])",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def shift_up_left(self, times=1):\n try:\n return Location(self._rank + times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def test_jump_to_location_shift(self, cpu):\n for shift in range(0x0, 0xFFF):\n cpu.opcode = 0xB000 | shift\n cpu.jump_to_location_shift()\n cpu.program_counter += 2\n assert(cpu.program_counter == cpu.V_register[0] + shift)",
"def left(self):\n x, y = (self.loc[0] - 1, self.loc[1])\n\n if x < 0:\n return None # None\n\n return self.garden.cells[y][x]",
"def _left(self, index):\r\n return 2*index + 1",
"def upleft(self):\n return Coord([self.x - 1, self.y - 1])",
"def test_findXCoordinateFromDirection_left(self):\n actual_result = rules.findXCoordinateFromDirection(8)\n expected_result = -2\n self.assertEqual(actual_result, expected_result)",
"def shift_right(self, times=1):\n try:\n return Location(self._rank, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def shift_left(self):\n self.pointer = (self.pointer - 1) % len(self.data)",
"def take_step(self, location, direction):\n if direction == RIGHT:\n step_location = (location[0]+1,location[1])\n elif direction == DOWN:\n step_location = (location[0], location[1]+1)\n else:\n return location\n\n if step_location[1] < self.rows and step_location[0] < self.columns:\n return step_location if self.is_space_open(step_location) else None",
"def shift_down(self, times=1):\n try:\n return Location(self._rank - times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def shifted(self, shift):\n new_location = None if self.location is None else self.location + shift\n reference = None if self.reference is None else self.reference + shift\n return self.copy_with_changes(\n location=new_location, reference=reference, derived_from=self,\n )",
"def test_shift_point(self):\n point = (0,0)\n new_point = utils.shift_point(point, 3, 4)\n self.assertEqual((3,4), new_point)\n\n point = (-2.34, 1.19)\n new_point = utils.shift_point(point, 2.34, -1.19)\n self.assertEqual((0,0), new_point)",
"def shift_down_right(self, times=1):\n try:\n return Location(self._rank - times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def shift_up(self, times=1):\n try:\n return Location(self._rank + times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def locate(x, y):\n position(x * 6, y)",
"def shift_up_right(self, times=1):\n try:\n return Location(self._rank + times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def pos_left(self, x=1):\n\n self.x -= x\n return self.pos(self.x, self.y)",
"def get_next_position(self):",
"def downright(self):\n return Coord([self.x + 1, self.y + 1])",
"def get_location(self, currentlocation, action):\n\t\t# Retrieve movement tuple from dictionary\n\t\tmovement = self.actions_dict[action]\n\t\t# Get new location using modulo of gridsize\n\t\tnewlocation = ((currentlocation[0]+movement[0]) % self.gridsize[0], (currentlocation[1]+movement[1]) % self.gridsize[1])\n\n\t\treturn newlocation",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def left_steering(measurement):\n measurement = (measurement + CORRECTION_FACTOR)\n return measurement",
"def add_on_land_position(self) -> Point2:\n return self.position.offset(Point2((-2.5, 0.5)))",
"def last_pos(self):\n return self.locs[self.indices[-1], 2:4]",
"def test_first_pos() -> None:\n assert sw.walk_to(1) == sw.Coordinate(0, 0)"
]
| [
"0.6472017",
"0.6380348",
"0.6290659",
"0.62783647",
"0.6157998",
"0.6058475",
"0.6055237",
"0.60299575",
"0.59697723",
"0.5902902",
"0.5895752",
"0.58797586",
"0.5867567",
"0.5864666",
"0.5861478",
"0.58449423",
"0.5817147",
"0.5814789",
"0.5799366",
"0.5786945",
"0.5749039",
"0.5719677",
"0.57162035",
"0.5713624",
"0.5713624",
"0.5713624",
"0.5703334",
"0.5679345",
"0.5655313",
"0.5646834"
]
| 0.65139586 | 0 |
Finds Location shifted up right by 1 | def shift_up_right(self, times=1):
try:
return Location(self._rank + times, self._file + times)
except IndexError as e:
raise IndexError(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shift_up(self, times=1):\n try:\n return Location(self._rank + times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def upright(self):\n return Coord([self.x + 1, self.y - 1])",
"def shift_down(self, times=1):\n try:\n return Location(self._rank - times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def shift_down_right(self, times=1):\n try:\n return Location(self._rank - times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def downright(self):\n return Coord([self.x + 1, self.y + 1])",
"def test_jump_to_location_shift(self, cpu):\n for shift in range(0x0, 0xFFF):\n cpu.opcode = 0xB000 | shift\n cpu.jump_to_location_shift()\n cpu.program_counter += 2\n assert(cpu.program_counter == cpu.V_register[0] + shift)",
"def shift_down_left(self, times=1):\n try:\n return Location(self._rank - times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def shift_up_left(self, times=1):\n try:\n return Location(self._rank + times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def get_location(self, currentlocation, action):\n\t\t# Retrieve movement tuple from dictionary\n\t\tmovement = self.actions_dict[action]\n\t\t# Get new location using modulo of gridsize\n\t\tnewlocation = ((currentlocation[0]+movement[0]) % self.gridsize[0], (currentlocation[1]+movement[1]) % self.gridsize[1])\n\n\t\treturn newlocation",
"def shift_right(self, times=1):\n try:\n return Location(self._rank, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def take_step(self, location, direction):\n if direction == RIGHT:\n step_location = (location[0]+1,location[1])\n elif direction == DOWN:\n step_location = (location[0], location[1]+1)\n else:\n return location\n\n if step_location[1] < self.rows and step_location[0] < self.columns:\n return step_location if self.is_space_open(step_location) else None",
"def upleft(self):\n return Coord([self.x - 1, self.y - 1])",
"def locate(x, y):\n position(x * 6, y)",
"def downleft(self):\n return Coord([self.x - 1, self.y + 1])",
"def _change_offset(self, value, direction):\n if direction == 1:\n if value >= self.POSITIONS-1:\n value = 0\n else:\n value += 1\n elif direction == -1:\n if value == 0:\n value = self.POSITIONS-1\n else:\n value -= 1\n return value",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def test_findXCoordinateFromDirection_up(self):\n actual_result = rules.findXCoordinateFromDirection(1)\n expected_result = 0\n self.assertEqual(actual_result, expected_result)",
"def get_next_position(self):",
"def move_toward(state, location):\n return move_relative(state, location, True)",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def last_pos(self):\n return self.locs[self.indices[-1], 2:4]",
"def middleUp(self):",
"def add_on_land_position(self) -> Point2:\n return self.position.offset(Point2((-2.5, 0.5)))",
"def take_step(self):\n if self.facing == 0:\n self.new_loc = (self.new_loc[0], self.new_loc[1] + 1)\n elif self.facing == 1:\n self.new_loc = (self.new_loc[0] + 1, self.new_loc[1])\n elif self.facing == 2:\n self.new_loc = (self.new_loc[0], self.new_loc[1] - 1)\n else:\n self.new_loc = (self.new_loc[0] - 1, self.new_loc[1])",
"def test_shift_point(self):\n point = (0,0)\n new_point = utils.shift_point(point, 3, 4)\n self.assertEqual((3,4), new_point)\n\n point = (-2.34, 1.19)\n new_point = utils.shift_point(point, 2.34, -1.19)\n self.assertEqual((0,0), new_point)",
"def shift_left(self, times=1):\n try:\n return Location(self._rank, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def final_coord(steps):\n return reduce(add, steps, (0, 0))",
"def look(self):\n self.location.look()"
]
| [
"0.6286779",
"0.6285358",
"0.62705064",
"0.62500006",
"0.61474323",
"0.61086285",
"0.6047894",
"0.6024152",
"0.6012993",
"0.59905976",
"0.59783375",
"0.59731233",
"0.59724426",
"0.59681296",
"0.5835306",
"0.5796514",
"0.57926786",
"0.5772637",
"0.5772487",
"0.5758451",
"0.5758451",
"0.5758451",
"0.57482475",
"0.57346183",
"0.5646252",
"0.5634549",
"0.5604262",
"0.560294",
"0.5591409",
"0.5554958"
]
| 0.6357913 | 0 |
Finds Location shifted up left by 1 | def shift_up_left(self, times=1):
try:
return Location(self._rank + times, self._file - times)
except IndexError as e:
raise IndexError(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shift_down_left(self, times=1):\n try:\n return Location(self._rank - times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def shift_up(self, times=1):\n try:\n return Location(self._rank + times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def downleft(self):\n return Coord([self.x - 1, self.y + 1])",
"def upleft(self):\n return Coord([self.x - 1, self.y - 1])",
"def shift_down(self, times=1):\n try:\n return Location(self._rank - times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def shift_up_right(self, times=1):\n try:\n return Location(self._rank + times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def test_jump_to_location_shift(self, cpu):\n for shift in range(0x0, 0xFFF):\n cpu.opcode = 0xB000 | shift\n cpu.jump_to_location_shift()\n cpu.program_counter += 2\n assert(cpu.program_counter == cpu.V_register[0] + shift)",
"def shift_left(self, times=1):\n try:\n return Location(self._rank, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def take_step(self, location, direction):\n if direction == RIGHT:\n step_location = (location[0]+1,location[1])\n elif direction == DOWN:\n step_location = (location[0], location[1]+1)\n else:\n return location\n\n if step_location[1] < self.rows and step_location[0] < self.columns:\n return step_location if self.is_space_open(step_location) else None",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def upright(self):\n return Coord([self.x + 1, self.y - 1])",
"def shift_down_right(self, times=1):\n try:\n return Location(self._rank - times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def get_location(self, currentlocation, action):\n\t\t# Retrieve movement tuple from dictionary\n\t\tmovement = self.actions_dict[action]\n\t\t# Get new location using modulo of gridsize\n\t\tnewlocation = ((currentlocation[0]+movement[0]) % self.gridsize[0], (currentlocation[1]+movement[1]) % self.gridsize[1])\n\n\t\treturn newlocation",
"def locate(x, y):\n position(x * 6, y)",
"def get_next_position(self):",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def downright(self):\n return Coord([self.x + 1, self.y + 1])",
"def shift_right(self, times=1):\n try:\n return Location(self._rank, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def last_pos(self):\n return self.locs[self.indices[-1], 2:4]",
"def move_toward(state, location):\n return move_relative(state, location, True)",
"def _change_offset(self, value, direction):\n if direction == 1:\n if value >= self.POSITIONS-1:\n value = 0\n else:\n value += 1\n elif direction == -1:\n if value == 0:\n value = self.POSITIONS-1\n else:\n value -= 1\n return value",
"def middleUp(self):",
"def test_shift_point(self):\n point = (0,0)\n new_point = utils.shift_point(point, 3, 4)\n self.assertEqual((3,4), new_point)\n\n point = (-2.34, 1.19)\n new_point = utils.shift_point(point, 2.34, -1.19)\n self.assertEqual((0,0), new_point)",
"def OldStartingIndex(self) -> int:",
"def test_findXCoordinateFromDirection_up(self):\n actual_result = rules.findXCoordinateFromDirection(1)\n expected_result = 0\n self.assertEqual(actual_result, expected_result)",
"def take_step(self):\n if self.facing == 0:\n self.new_loc = (self.new_loc[0], self.new_loc[1] + 1)\n elif self.facing == 1:\n self.new_loc = (self.new_loc[0] + 1, self.new_loc[1])\n elif self.facing == 2:\n self.new_loc = (self.new_loc[0], self.new_loc[1] - 1)\n else:\n self.new_loc = (self.new_loc[0] - 1, self.new_loc[1])",
"def get_start_plus_coordinate(self):\r\n if self.__orientation == Direction.VERTICAL:\r\n start_plus_coordinate = (self.__location[0] + 1,\r\n self.__location[1])\r\n if self.__orientation == Direction.HORIZONTAL:\r\n start_plus_coordinate = (self.__location[0],\r\n self.__location[1] + 1)\r\n return start_plus_coordinate",
"def get(self):\n return self.x-self.offset"
]
| [
"0.63747233",
"0.6306525",
"0.62983286",
"0.6265653",
"0.620307",
"0.6142705",
"0.6125544",
"0.61203617",
"0.6104564",
"0.60243714",
"0.6014703",
"0.60107845",
"0.59468216",
"0.5945145",
"0.59078413",
"0.5889049",
"0.5889049",
"0.5889049",
"0.58886904",
"0.58095354",
"0.58086175",
"0.5769504",
"0.57638216",
"0.5731447",
"0.56851524",
"0.5679137",
"0.5675711",
"0.56589264",
"0.56516767",
"0.564073"
]
| 0.6393994 | 0 |
Finds Location shifted down right by 1 | def shift_down_right(self, times=1):
try:
return Location(self._rank - times, self._file + times)
except IndexError as e:
raise IndexError(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def downright(self):\n return Coord([self.x + 1, self.y + 1])",
"def shift_down(self, times=1):\n try:\n return Location(self._rank - times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def upright(self):\n return Coord([self.x + 1, self.y - 1])",
"def downleft(self):\n return Coord([self.x - 1, self.y + 1])",
"def shift_up_right(self, times=1):\n try:\n return Location(self._rank + times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def shift_down_left(self, times=1):\n try:\n return Location(self._rank - times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def take_step(self, location, direction):\n if direction == RIGHT:\n step_location = (location[0]+1,location[1])\n elif direction == DOWN:\n step_location = (location[0], location[1]+1)\n else:\n return location\n\n if step_location[1] < self.rows and step_location[0] < self.columns:\n return step_location if self.is_space_open(step_location) else None",
"def shift_right(self, times=1):\n try:\n return Location(self._rank, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def upleft(self):\n return Coord([self.x - 1, self.y - 1])",
"def get_next_position(self):",
"def test_jump_to_location_shift(self, cpu):\n for shift in range(0x0, 0xFFF):\n cpu.opcode = 0xB000 | shift\n cpu.jump_to_location_shift()\n cpu.program_counter += 2\n assert(cpu.program_counter == cpu.V_register[0] + shift)",
"def locate(x, y):\n position(x * 6, y)",
"def _change_offset(self, value, direction):\n if direction == 1:\n if value >= self.POSITIONS-1:\n value = 0\n else:\n value += 1\n elif direction == -1:\n if value == 0:\n value = self.POSITIONS-1\n else:\n value -= 1\n return value",
"def shift_up(self, times=1):\n try:\n return Location(self._rank + times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def last_pos(self):\n return self.locs[self.indices[-1], 2:4]",
"def get_location(self, currentlocation, action):\n\t\t# Retrieve movement tuple from dictionary\n\t\tmovement = self.actions_dict[action]\n\t\t# Get new location using modulo of gridsize\n\t\tnewlocation = ((currentlocation[0]+movement[0]) % self.gridsize[0], (currentlocation[1]+movement[1]) % self.gridsize[1])\n\n\t\treturn newlocation",
"def test_findXCoordinateFromDirection_up(self):\n actual_result = rules.findXCoordinateFromDirection(1)\n expected_result = 0\n self.assertEqual(actual_result, expected_result)",
"def middleUp(self):",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def shift_up_left(self, times=1):\n try:\n return Location(self._rank + times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def move_toward(state, location):\n return move_relative(state, location, True)",
"def rightUp(self):",
"def backward_character():\r\n set_point(point().offset(-1))",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def modpos(pos,L,move):\n pos += move\n if pos == L: #moved off right or bottom\n return(0)\n if pos == -1:#moved off top or left\n return(L-1)\n return(pos) #in the middle",
"def at(board, pos):\n return (board >> pos-1) & 1",
"def right_distance(self):\n return self.board.length - 1 - self.x",
"def add_on_land_position(self) -> Point2:\n return self.position.offset(Point2((-2.5, 0.5)))"
]
| [
"0.6554093",
"0.648883",
"0.64419365",
"0.6361129",
"0.6267717",
"0.6208789",
"0.6115083",
"0.61124676",
"0.6071752",
"0.6006999",
"0.600069",
"0.5999654",
"0.5944346",
"0.5940241",
"0.5940005",
"0.58752936",
"0.5782369",
"0.5772367",
"0.5767407",
"0.57642084",
"0.5757818",
"0.57338876",
"0.5729367",
"0.5717747",
"0.5717747",
"0.5717747",
"0.5666268",
"0.564817",
"0.5642892",
"0.5628258"
]
| 0.65386313 | 1 |
Finds Location shifted down left by 1 | def shift_down_left(self, times=1):
try:
return Location(self._rank - times, self._file - times)
except IndexError as e:
raise IndexError(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def downleft(self):\n return Coord([self.x - 1, self.y + 1])",
"def upleft(self):\n return Coord([self.x - 1, self.y - 1])",
"def shift_down(self, times=1):\n try:\n return Location(self._rank - times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def downright(self):\n return Coord([self.x + 1, self.y + 1])",
"def take_step(self, location, direction):\n if direction == RIGHT:\n step_location = (location[0]+1,location[1])\n elif direction == DOWN:\n step_location = (location[0], location[1]+1)\n else:\n return location\n\n if step_location[1] < self.rows and step_location[0] < self.columns:\n return step_location if self.is_space_open(step_location) else None",
"def shift_down_right(self, times=1):\n try:\n return Location(self._rank - times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def shift_up_left(self, times=1):\n try:\n return Location(self._rank + times, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def upright(self):\n return Coord([self.x + 1, self.y - 1])",
"def get_next_position(self):",
"def locate(x, y):\n position(x * 6, y)",
"def test_jump_to_location_shift(self, cpu):\n for shift in range(0x0, 0xFFF):\n cpu.opcode = 0xB000 | shift\n cpu.jump_to_location_shift()\n cpu.program_counter += 2\n assert(cpu.program_counter == cpu.V_register[0] + shift)",
"def shift_left(self, times=1):\n try:\n return Location(self._rank, self._file - times)\n except IndexError as e:\n raise IndexError(e)",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def shift_up_right(self, times=1):\n try:\n return Location(self._rank + times, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def shift_up(self, times=1):\n try:\n return Location(self._rank + times, self._file)\n except IndexError as e:\n raise IndexError(e)",
"def last_pos(self):\n return self.locs[self.indices[-1], 2:4]",
"def test_findXCoordinateFromDirection_left(self):\n actual_result = rules.findXCoordinateFromDirection(8)\n expected_result = -2\n self.assertEqual(actual_result, expected_result)",
"def get_location(self, currentlocation, action):\n\t\t# Retrieve movement tuple from dictionary\n\t\tmovement = self.actions_dict[action]\n\t\t# Get new location using modulo of gridsize\n\t\tnewlocation = ((currentlocation[0]+movement[0]) % self.gridsize[0], (currentlocation[1]+movement[1]) % self.gridsize[1])\n\n\t\treturn newlocation",
"def _change_offset(self, value, direction):\n if direction == 1:\n if value >= self.POSITIONS-1:\n value = 0\n else:\n value += 1\n elif direction == -1:\n if value == 0:\n value = self.POSITIONS-1\n else:\n value -= 1\n return value",
"def left(self):\n x, y = (self.loc[0] - 1, self.loc[1])\n\n if x < 0:\n return None # None\n\n return self.garden.cells[y][x]",
"def modpos(pos,L,move):\n pos += move\n if pos == L: #moved off right or bottom\n return(0)\n if pos == -1:#moved off top or left\n return(L-1)\n return(pos) #in the middle",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def Offset(self) -> int:",
"def shift_right(self, times=1):\n try:\n return Location(self._rank, self._file + times)\n except IndexError as e:\n raise IndexError(e)",
"def get(self):\n return self.x-self.offset",
"def _left(self, index):\r\n return 2*index + 1",
"def move_toward(state, location):\n return move_relative(state, location, True)",
"def nextPositionOffset(self):\n if self.dir == \"N\":\n return (0, -1)\n elif self.dir == \"S\":\n return (0, 1)\n elif self.dir == \"E\":\n return (1, 0)\n elif self.dir == \"W\":\n return (-1, 0)\n else:\n raise TypeError(\"invalid direction '%s'\" % self.dir)",
"def middleUp(self):"
]
| [
"0.6744617",
"0.6414325",
"0.6344078",
"0.62217546",
"0.62195355",
"0.61563265",
"0.61307687",
"0.61189425",
"0.6075411",
"0.60592055",
"0.60378623",
"0.6034514",
"0.59856564",
"0.59467053",
"0.5927173",
"0.58799267",
"0.58688897",
"0.58650124",
"0.58649886",
"0.5841106",
"0.5812556",
"0.5804705",
"0.5804705",
"0.5804705",
"0.58025813",
"0.5786231",
"0.57832134",
"0.5781996",
"0.5743178",
"0.5731922"
]
| 0.65283656 | 1 |
return shopping cart detail | def cart_detail(request):
cart = Cart(request)
return render(request, 'cart/cart.html', {'cart': cart}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def detail(request):\n # del request.session['cart_id']\n # del request.session['total_in_cart']\n data = {}\n if (cart_id := request.session.get('cart_id', None)):\n cart = Cart.objects.get(pk=cart_id)\n data['products_in_cart'] = cart.cartitems.all()\n data['total_price'] = cart.cart_price\n\n return render(request, 'cart/details.html', data)",
"def get(self,request):\r\n try:\r\n if request.user.is_authenticated():\r\n cart = self.cart_obj.get_cart_by_user(request.user)\r\n else:\r\n cart = self.cart_obj.get_cart_by_id(request.session.get('cart_id',None))\r\n \r\n if not cart:\r\n self.context['no_items'] = True\r\n return render(request, 'cart.html', self.context)\r\n request.session['cart_id'] = cart.first().id\r\n cart_details_list =[]\r\n if cart:\r\n cart_details = self.cart_det_obj.get_cart_items(cart.first().id) \r\n \"\"\" \r\n :Note If face any issue with cart order by cartid and get the latest cartid.\r\n \"\"\"\r\n for cart in cart_details:\r\n product = Product.objects.filter(id=cart.product_id)\r\n cart_temp_dict = {}\r\n cart_temp_dict['product'] = product.first()\r\n cart_temp_dict['quantity'] = cart.quantity\r\n cart_temp_dict['price'] = product.first().price\r\n cart_temp_dict[cart.id] = cart.id\r\n cart_details_list.append(cart_temp_dict)\r\n \r\n self.context['cart_details'] = cart_details_list\r\n self.context['cart_count'] = cart_details.count()\r\n response = render(request, 'cart.html', self.context)\r\n return response\r\n except:\r\n print(\"500\")\r\n raise Exception",
"def cart_detail(request, pk):\n data = request.data\n try:\n user = validations_utils.user_validation(pk) # Validates if user exists or not.\n token_user_id = validations_utils.user_token_validation(\n request.auth.user_id, pk) # Validates user's Token authentication.\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n\n if request.method == 'GET':\n if Cart.objects.filter(user_id=user.id).exists(): # Checks if product_category exists with given id.\n cart_items = Cart.objects.filter(user_id=user.id)\n else:\n return Response(messages.EMPTY_CART, status=status.HTTP_404_NOT_FOUND)\n if cart_items:\n cart_serializer = CartSerializer(cart_items, many=True)\n cart_data = cart_serializer.data\n data = []\n for obj in cart_data:\n x = utils.get_item_id(obj)\n item = validations_utils.item_validation(int(x))\n obj['name'] = item.name\n data.append(obj)\n return Response(data, status=status.HTTP_200_OK)\n else:\n return Response(messages.EMPTY_CART, status=status.HTTP_204_NO_CONTENT)",
"def cart(request):\n return {'cart': get_cart_from_request(request)}",
"def api_display_cart():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tcart = cur.execute('SELECT * FROM cart;').fetchall()\r\n\tcart.append(cur.execute('SELECT SUM(price) from cart;').fetchone())\r\n\treturn jsonify(cart)",
"def get_cart_product():\n email = session.get('email')\n if email:\n customer = db.session.query(Customer).filter(Customer.email == email).first()\n cart = db.session.query(Cart).filter(Cart.customer_id == customer.customer_id).all() \n products = {}\n\n for product in cart:\n products[product.product.name] = product.quantity\n return jsonify(products)\n\n else:\n return redirect('/')",
"def cart_detail(request):\n cart = Cart(request)\n # Allow user to change the quantity from the details page.\n for item in cart:\n # Remember that a cart is stored as a dictionary in the user's session.\n # Here, we're adding a new key/value pair to the cart.\n # Create an instance of CartAddProductForm for each item in the cart to\n # allow changing product quantities. Initialize the form with the current\n # item quantity and set the update field to True so that when we submit the\n # form to the cart_add view, the current quantity is replaced with the new\n # one.\n # I DON'T QUITE UNDERSTAND WHAT THIS CODE IS DOING.\n item['update_quantity_form'] = CartAddProductForm(\n initial={'quantity': item['quantity'],\n 'update': True})\n coupon_apply_form = CouponApplyForm()\n return render(request, 'cart/detail.html', {'cart': cart, 'coupon_apply_form': coupon_apply_form})",
"def get_cart_contents(db):",
"def view_cart(request):\n \n return render(request, \"cart.html\" )",
"def display(auth_context):\n\n cart = carts.get_cart(auth_context.get('uid'))\n for item in cart:\n product = product_catalog.get_product(item.item_id)\n item.info = product\n\n return render_template('cart.html',\n cart=cart,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)",
"def get_cart(id):\n url = carts_service_host + '/cart/' + id\n cart = requests.get(url).json()\n total = self._get_cart_total(cart['items'])\n return (jsonify(dict(total=total, cart=cart)),\n 200)",
"def view_cart(request):\n\n return render(request, 'cart/cart.html')",
"def view_cart(request):\n\n return render(request, 'cart/cart.html')",
"def test_shoppingcart_detail(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then performing detail\n self._detail_model(\"shoppingcart\", self.shoppingcart_data, id, [\"quantity\", \"discount_value\", \"is_closed\"])\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)",
"def view_cart(request):\n return render(request, \"cart.html\")",
"def view_cart(request):\n return render(request, \"cart.html\")",
"def view_cart(request):\n return render(request, \"cart.html\")",
"def cart_detail(request):\n assert isinstance(request, HttpRequest)\n\n if request.method == \"POST\":\n cart_service.remove_from_cart(request)\n\n return render(\n request,\n 'cartapp/cart_detail.html',\n {\n 'title':'Cart Page',\n 'year':datetime.now().year,\n }\n )\n else:\n return render(\n request,\n 'cartapp/cart_detail.html',\n {\n 'title':'Cart Page',\n 'year':datetime.now().year,\n }\n )",
"def view_cart(request):\n categories = all_categories()\n productTypes = all_productTypes()\n return render(request, \"cart.html\", {\"categories\": categories,\n \"productTypes\": productTypes})",
"def cart_contents(request):\n cart = request.session.get('cart', {})\n\n cart_items = []\n total_cart = 0\n item_count = 0\n partial_value = []\n\n for item in cart:\n if item == 'car':\n id = cart['car']['item_id']\n quantity = cart['car']['quantity']\n instance = Car\n item_type = 'car'\n elif item == 'track_day':\n id = cart['track_day']['item_id']\n quantity = cart['track_day']['quantity']\n instance = TrackDayAddon\n item_type = 'track_day'\n elif item == 'insurance':\n id = cart['insurance']['item_id']\n quantity = cart['insurance']['quantity']\n instance = InsuranceAddon\n item_type = 'insurance'\n elif item == 'private_driver':\n id = cart['private_driver']['item_id']\n quantity = cart['private_driver']['quantity']\n instance = PrivateDriverAddon\n item_type = 'private_driver'\n\n item = get_object_or_404(instance, pk=id)\n total_cart += quantity * item.price\n item_total = quantity * item.price\n item_count += 1\n\n partial_value.append({\n 'item': item,\n 'item_type': item_type,\n 'id': id,\n 'item_total': item_total\n })\n cart_items.append({\n 'item': item,\n 'item_type': item_type,\n 'id': id,\n 'quantity': quantity,\n })\n\n return {'cart_items': cart_items, 'partial_value': partial_value,\n 'total_cart': total_cart, 'item_count': item_count}",
"def test_get_cart(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', True)\n self.assertEqual(self.cart_item_manager.get_cart(user_id, cart_id),\n self.dynamo_accessor.get_item(config.dynamo_cart_table_name,\n keys={'UserId': user_id, 'CartId': cart_id}))",
"def get_cart_items(request):\n return CartItem.objects.filter(cart_id = get_cart_id_session(request))",
"def getCartDetails():\n try:\n result = json.loads(request.get_data(as_text=True))\n userId = request.json['userId']\n print(\"user id is:\"+userId)\n items = myCart.find({\"userId\":userId})\n data = dumps(items)\n print(str(items))\n stats = myCart.aggregate(\n [\n #{ \"$match\" : { \"userId\" : \"88041fab-078c-4e34-8f03-1dadbe1c537a\"} },\n { \"$match\" : { \"userId\" : userId} },\n { \"$group\": \n { \n \"_id\": { \"userId\": \"$userId\" },\n \"totalAmount\": \n { \"$sum\": \n { \"$multiply\": [ \"$price\", \"$quantity\" ] }\n },\n \"totalQuantity\": { \"$sum\": \"$quantity\" } }\n }\n \n ]\n )\n statistics = dumps(stats)\n return jsonify({\"Status\" : \"OK\", \"data\" : data, \"stats\":statistics})\n except Exception, e:\n return jsonify(status='ERROR',message=str(e))",
"def shopping_cart(request, movie_id=None):\n cart = request.session.get('cart', [])\n tickets = Tickets.objects.filter(id__in=cart)\n context = {\n 'tickets': tickets,\n 'cart': cart\n }\n\n return render(request, 'shopping_cart.html', context)",
"def get_items_by_cart_page(request):\n items = Item.get_items_by_cart(request.GET['cart_id'])\n items = models_to_json(items)\n return JsonResponse({'items': items})",
"def get_cart_items(request):\n return CartItem.objects.filter(cart_id=_cart_id(request))",
"def get_cart_items(request):\n return CartItem.objects.filter(cart_id=_cart_id(request))",
"def getCartDetailsForUser():\n try:\n result = json.loads(request.get_data(as_text=True))\n userId = request.json['userId']\n print(\"In get of shopping cart, user id :\"+userId)\n items = myCart.find({\"userId\":userId},{\"_id\":0})\n data = dumps(items)\n print(str(items))\n stats = myCart.aggregate(\n [\n { \"$match\" : { \"userId\" : userId} },\n { \"$group\": \n { \n \"_id\": { \"userId\": \"$userId\" },\n \"totalAmount\": \n { \"$sum\": \n { \"$multiply\": [ \"$price\", \"$quantity\" ] }\n },\n \"totalQuantity\": { \"$sum\": \"$quantity\" } }\n }\n \n ]\n )\n statistics = dumps(stats)\n return jsonify({\"Status\" : \"OK\", \"data\" : data, \"stats\":statistics})\n except Exception, e:\n return jsonify(status='ERROR',message=str(e),userId=userId)",
"def parse_cart_page(self, id, body):\n info = {}\n if self.__re_search(body, *self.regx['cart_unavailable']):\n return None\n\n body = body[body.find(id):]\n info['price'], p = self.__re_search_item_pos(body,\n *self.regx['cart_price'])\n b2 = body[:p]\n info['original'] = self.__re_search(b2, *self.regx['cart_original'])\n info['save'] = self.__re_search(b2, *self.regx['cart_save'])\n info['rebate'] = self.__re_search(body, *self.regx['rebate'])\n info['shipping'] = self.__re_search(body, *self.regx['cart_shipping'])\n return info",
"def goto_cart(self):\n self.driver.find_element(*BasePageLocators.GO_CART).click()\n return CartPage(self.driver)"
]
| [
"0.80897045",
"0.74780965",
"0.74587524",
"0.7386842",
"0.73347443",
"0.7275893",
"0.7201817",
"0.71161604",
"0.7087134",
"0.68783367",
"0.68732184",
"0.6833306",
"0.6833306",
"0.68280244",
"0.6793842",
"0.6793842",
"0.6793842",
"0.6753704",
"0.6749037",
"0.67163604",
"0.66729295",
"0.6641576",
"0.66327816",
"0.6630418",
"0.66268444",
"0.66197324",
"0.66197324",
"0.65987915",
"0.6598123",
"0.65577245"
]
| 0.79108906 | 1 |
Load the indexes listed in this dataset's image set file. | def _load_image_set_index(self):
image_index = []
image_set_file = self.data_dir \
+ "/ImageSets/{}.txt".format(self.mode)
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file, 'r') as f:
for line in f.readlines():
image_index.append(line.strip())
return image_index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_image_set_index(self):\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index",
"def _load_image_set_index(self):\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path,\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index",
"def _load_image_set_index(self):\n image_set_file = os.path.join(self._data_path, self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.rstrip('\\n') for x in f.readlines()]\n return image_index",
"def _load_image_set_index(self):\n # Example path to image set file:\n # self._data_path + /ImageSets/val.txt\n image_set_file = os.path.join(self._data_path, 'ImageSets',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index",
"def _load_image_set_index(self):\n image_set_file = os.path.join(self._data_path, 'ImageSets',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index",
"def _load_image_set_index(self):\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self.cfg.file_path, 'ImageSets', 'Main',\n self.cfg.train_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n\n with open(image_set_file) as f:\n image_index = []\n for x in f.readlines():\n xdata = x.strip().split(' ')\n if len(xdata) == 1 or xdata[-1] == '1':\n image_index.append(xdata[0])\n # image_index = [x.strip() for x in f.readlines()]\n return image_index",
"def _load_image_set_index(self):\n image_index = self._load_annotations().keys()\n return image_index",
"def _load_image_set_index(self, anno_filepath):\n # Check\n assert os.path.exists(anno_filepath), \\\n 'Path does not exist: {}'.format(anno_filepath)\n # Open and read\n with open(anno_filepath) as f:\n # format: imgidx x1 y1 x2 y2 label_list\n # whre label list look like this: 0 0 0 0 1 0 0 (assume here has six action classes)\n image_index = [x.strip().split()[0] for x in f.readlines()]\n # \n return image_index",
"def _load_image_set_index(_year, _image_set):\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n _devkit_path = os.path.join(cfg.DATA_DIR, 'VOCdevkit' + _year)\n _data_path = os.path.join(_devkit_path, 'VOC' + _year)\n image_set_file = os.path.join(_data_path, 'ImageSets', 'Main',\n _image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index",
"def load_images(self, list_indices=None, start=None, end=None):\n if start is None and list_indices is None:\n start = 0\n if end is None and list_indices is None:\n end = len(self._image_names)\n if list_indices is None:\n assert start >= 0\n assert start < end\n assert end <= len(self._image_names)\n list_indices = np.arange(start, end)\n\n self.image_indices = []\n self.images = []\n self.image_names = []\n for i, image_name in enumerate(self._image_names):\n if i in list_indices:\n image_filename = os.path.join(self.directory, image_name)\n image = skio.imread(image_filename)\n self.image_indices.append(i)\n self.images.append(image)\n self.image_names.append(image_name)\n print(len(self.images), 'images loaded!')",
"def _load_image_set_index(self, shuffle):\n\n print 'preparing [%s] data (reading xml annotations)...' % self.image_set\n\n # retrieve all annotation files for every image in annotation_path\n annotation_full_set = [os.path.basename(xml_path)\n for xml_path in sorted(glob.glob('%s/*.xml' % self.annotation_path))]\n annotation_set = []\n if self.image_set == 'train': # set00 ~ set04 are for training\n for img_name in annotation_full_set:\n set_id = int(img_name.split('_')[0].strip('set'))\n #if set_id < 5:\n if set_id < 1:\n annotation_set.append(img_name)\n elif self.image_set == 'val':\n for img_name in annotation_full_set:\n set_id = int(img_name.split('_')[0].strip('set'))\n #if set_id == 5:\n if set_id == 1:\n annotation_set.append(img_name)\n elif self.image_set == 'trainval': # set00 ~ set05 are for training + val\n for img_name in annotation_full_set:\n set_id = int(img_name.split('_')[0].strip('set'))\n #if set_id <= 5:\n if set_id <= 1:\n annotation_set.append(img_name)\n elif self.image_set == 'test':\n for img_name in annotation_full_set:\n set_id = int(img_name.split('_')[0].strip('set'))\n #if set_id > 5:\n if set_id > 1:\n annotation_set.append(img_name)\n else:\n raise NotImplementedError, \"check if self.image_set is either \" \\\n \"train, val, trainval, or test. \" + \\\n self.image_set + \" not supported\"\n\n if shuffle:\n print 'shuffling data as asked...'\n np.random.shuffle(annotation_set)\n\n print 'preparing [%s] data (reading xml annotations)...totally %d...Done!' % (self.image_set, len(annotation_set))\n\n return annotation_set",
"def load(self, image_loader):\n self._image_loader = image_loader\n for tile_set in self.tile_sets:\n # do images first, because tiles could reference it\n for img in tile_set.images:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n tile_set.indexed_images[img.id] = self._load_image(img)\n # tiles\n for tile in tile_set.tiles:\n for img in tile.images:\n if not img.content and not img.source:\n # only image id set\n indexed_img = tile_set.indexed_images[img.id]\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)\n else:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n indexed_img = self._load_image(img)\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)",
"def load(self, image_loader):\n self._image_loader = image_loader\n for tile_set in self.tile_sets:\n # do images first, because tiles could reference it\n for img in tile_set.images:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n tile_set.indexed_images[img.id] = self._load_image(img)\n # tiles\n for tile in tile_set.tiles:\n for img in tile.images:\n if not img.content and not img.source:\n # only image id set\n indexed_img = tile_set.indexed_images[img.id]\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)\n else:\n if img.source:\n self._load_image_from_source(tile_set, img)\n else:\n indexed_img = self._load_image(img)\n self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)",
"def load_data(self, from_idx):\n length = len(self.filenames)\n # we assume all images have the same dimensions\n shape = cv2.imread(filenames[0], int(self.color)).shape\n if not self.color:\n shape += (1,) # add additionnal channel for black and white\n X = []\n for f in tqdm(self.filenames[:5000]):\n if psutil.virtual_memory()[2] >= 60.0:\n break # preserve memory\n img = cv2.imread(f, int(self.color))\n if img is not None:\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n # change range of image to [-1, 1]\n # TODO : different procedure for colored images\n if not self.color:\n img = img.astype('float32')\n mx = np.max(img)\n mn = np.min(img)\n m = mx/2 + mn/2\n r = mx/2 - mn/2\n else:\n mx = np.amax(np.amax(img, axis=0), axis=0)\n mn = np.amin(np.amin(img, axis=0), axis=0)\n m = mx/2 + mn/2\n r = mx/2 - mn/2\n if np.all(r):\n img = (img - m)/r # works in both cases\n # add to dataset\n X.append(img)\n self.X = np.array(X)",
"def _load_split_indices(self):\n split_file = self.SPLITS.get(self.split)\n indices_file = self._filepath(split_file)\n\n with open(indices_file) as txt_file:\n idx_data = [int(i) for i in txt_file.readline().split()]\n\n return idx_data",
"def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels",
"def load_data_list(self):\n\n data = mat4py.loadmat(self.ann_file)['images']\n names = data['name']\n labels = data['class']\n parts = data['set']\n num = len(names)\n assert num == len(labels) == len(parts), 'get error ann file'\n\n if self.split == 'train':\n target_set = {1}\n elif self.split == 'val':\n target_set = {2}\n elif self.split == 'test':\n target_set = {3}\n else:\n target_set = {1, 2}\n\n data_list = []\n for i in range(num):\n if parts[i] in target_set:\n img_name = names[i]\n img_path = self.backend.join_path(self.img_prefix, img_name)\n gt_label = labels[i] - 1\n info = dict(img_path=img_path, gt_label=gt_label)\n data_list.append(info)\n\n return data_list",
"def _load_metadata(self):\n\n cub_dir = self.root / \"CUB_200_2011\"\n images_list: Dict[int, List] = OrderedDict()\n\n with open(str(cub_dir / \"train_test_split.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n is_train_instance = int(row[1]) == 1\n if is_train_instance == self.train:\n images_list[img_id] = []\n\n with open(str(cub_dir / \"images.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n images_list[img_id].append(row[1])\n\n with open(str(cub_dir / \"image_class_labels.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n # CUB starts counting classes from 1 ...\n images_list[img_id].append(int(row[1]) - 1)\n\n with open(str(cub_dir / \"bounding_boxes.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n box_cub = [int(float(x)) for x in row[1:]]\n box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]\n # PathsDataset accepts (top, left, height, width)\n images_list[img_id].append(box_avl)\n\n images_tuples = []\n for _, img_tuple in images_list.items():\n images_tuples.append(tuple(img_tuple))\n self._images = images_tuples # type: ignore\n\n # Integrity check\n for row_check in self._images:\n filepath = self.root / CUB200.images_folder / row_check[0]\n if not filepath.is_file():\n if self.verbose:\n print(\"[CUB200] Error checking integrity of:\", filepath)\n return False\n\n return True",
"def index_subset(subset):\n images = []\n print('Indexing {}...'.format(subset))\n # Quick first pass to find total for tqdm bar\n subset_len = 0\n for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}/'.format(subset)):\n subset_len += len([f for f in files if f.endswith('.png')])\n\n progress_bar = tqdm(total=subset_len)\n for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}/'.format(subset)):\n if len(files) == 0:\n continue\n\n class_name = root.split('/')[-1]\n\n for f in files:\n progress_bar.update(1)\n images.append({\n 'subset': subset,\n 'class_name': class_name,\n 'filepath': os.path.join(root, f)\n })\n\n progress_bar.close()\n return images",
"def index_subset(subset):\n images = []\n print('Indexing {}...'.format(subset))\n # Quick first pass to find total for tqdm bar\n subset_len = 0\n \n \n for root, folders, files in os.walk(DATA_PATH + '/Omniglot/images_{}/'.format(subset)):\n subset_len += len([f for f in files if f.endswith('.png')])\n\n progress_bar = tqdm(total=subset_len)\n for root, folders, files in os.walk(DATA_PATH + '/Omniglot/images_{}/'.format(subset)):\n if len(files) == 0:\n continue\n\n alphabet = root.split('/')[-2]\n class_name = '{}.{}'.format(alphabet, root.split('/')[-1])\n\n for f in files:\n progress_bar.update(1)\n images.append({\n 'subset': subset, \n 'class_name': class_name,\n 'filepath': os.path.join(root, f)\n })\n\n progress_bar.close()\n return images",
"def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets",
"def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))",
"def _compute_indices(self):\n self.indices = np.arange(len(self.im_filenames))\n np.random.shuffle(self.indices)",
"def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True",
"def load_data(self):\n for set_name in self.image_dir_path:\n if self.verbose:\n print('\\n> Loading data files for the set: ' + set_name)\n\n # image dir\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n\n # annotation file path\n annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])\n\n if 'test' in set_name:\n yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)\n else:\n yield self.load_data_trainval(set_name, image_dir, annot_filepath)",
"def load_mnist(kind='train'):\r\n with open('%s-labels.idx1-ubyte' % kind, 'rb') as lbpath:\r\n magic, n = struct.unpack('>II', lbpath.read(8))\r\n labels = np.fromfile(lbpath, dtype=np.uint8)\r\n\r\n with open('%s-images.idx3-ubyte' % kind, 'rb') as imgpath:\r\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\r\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\r\n\r\n return images, labels",
"def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices",
"def _index_data(self, raw_data):\n self._all_class_images = collections.OrderedDict()\n self._image_embedding = collections.OrderedDict()\n for i, k in enumerate(raw_data[\"keys\"]):\n _, class_label, image_file = k.split(\"-\")\n image_file_class_label = image_file.split(\"_\")[0]\n assert class_label == image_file_class_label\n self._image_embedding[image_file] = raw_data[\"embeddings\"][i]\n if class_label not in self._all_class_images:\n self._all_class_images[class_label] = []\n self._all_class_images[class_label].append(image_file)\n\n self._check_data_index(raw_data)\n\n self._all_class_images = collections.OrderedDict([\n (k, np.array(v)) for k, v in six.iteritems(self._all_class_images)\n ])\n if self._verbose:\n tf.logging.info(str([len(raw_data), len(self._all_class_images),\n len(self._image_embedding)]))",
"def load_data_in_folder(self):\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in range(0, idx_max-1):\n data = []\n for f in self.filenames[idx:idx+64]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))",
"def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels"
]
| [
"0.8110825",
"0.80748975",
"0.80505925",
"0.80249274",
"0.80228895",
"0.7933334",
"0.76450604",
"0.74506855",
"0.69827074",
"0.6790647",
"0.66879445",
"0.65021986",
"0.65021986",
"0.64863884",
"0.6448904",
"0.63080627",
"0.62396497",
"0.6179855",
"0.6103942",
"0.60800034",
"0.6065138",
"0.6039169",
"0.6012081",
"0.59450346",
"0.59409714",
"0.5931126",
"0.59297854",
"0.5928013",
"0.5917479",
"0.590687"
]
| 0.8145496 | 0 |
Calls the `to_pipeline` method on itself. | def pipeline(self) -> Pipeline:
if self._to_pipeline is None:
raise AttributeError(
"pipeline not available because `to_pipeline` was not set on __init__."
)
return self._to_pipeline(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pipeline(self):\n # gotta avoid circular imports by deferring\n from .pipeline import Pipeline\n return Pipeline().from_source(self._collection)",
"def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline",
"def pipeline(self):\n return self._pipeline",
"def pipeline(self):\n return self._pipeline",
"def run(self):\n self.pipeline = self.set_pipeline()\n self.pipeline.fit(self.X,self.y)\n return self",
"def pipeline(ctx):\n asyncio.run(pipeline_impl(ctx.obj[\"config\"]))",
"def transform(self, cfg_pipeline):\n return",
"def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)",
"def run(self, pipeline: pipeline_pb2.Pipeline) -> Optional[Any]:\n pass",
"def run(self):\n self.set_pipeline()\n self.pipeline.fit(self.X, self.y)",
"def process(obj, rv, logfile, verbose):\n _end_branch(obj)\n\n logger.info('creating pipeline...')\n stdin = click.get_text_stream('stdin')\n stdout = click.get_text_stream('stdout')\n\n def write(item):\n stdout.write(str(item))\n stdout.write('\\n')\n\n pl = pipeline.create(obj['pipeline'], output=write)\n\n logger.info('processing...')\n logger.info('----------------------------------------')\n try:\n pl.run(stdin)\n except Exception as e:\n logger.error(f'abort: {type(e).__name__}: {e}')\n raise click.Abort from e\n logger.info('----------------------------------------')\n logger.info('DONE.')",
"def pipeline_artifact(self):\n pass",
"def _create_pipeline(self) -> TfmIterator:\n # 1. Initialise TubRecord -> x, y transformations\n def get_x(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting x from record for training\"\"\"\n out_dict = self.model.x_transform(record, self.image_processor)\n # apply the normalisation here on the fly to go from uint8 -> float\n out_dict['img_in'] = normalize_image(out_dict['img_in'])\n return out_dict\n\n def get_y(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting y from record for training \"\"\"\n y = self.model.y_transform(record)\n return y\n\n # 2. Build pipeline using the transformations\n pipeline = self.sequence.build_pipeline(x_transform=get_x,\n y_transform=get_y)\n return pipeline",
"def pipe(self, func, *args, **kwargs):\n return func(self, *args, **kwargs)",
"def pipeline(self):\n\n self._get_data()\n self._upload_to_raw()",
"def pipeline(self, pipeline_id):\r\n return pipelines.Pipeline(self, pipeline_id)",
"def _on_pipeline_init(self) -> None:\n pass",
"def _pipeline(self):\n try:\n b = self._pipeline_cache\n except AttributeError:\n r = open_redis_connection()\n b = self._pipeline_cache = r.pipeline()\n return b",
"def forward_transform(self):\n\n if self._pipeline:\n #return functools.reduce(lambda x, y: x | y, [step[1] for step in self._pipeline[: -1]])\n return functools.reduce(lambda x, y: x | y, [step.transform for step in self._pipeline[:-1]])\n else:\n return None",
"def piped(self):\n\t\tpass",
"def append(self, pipeline):\n for stage in pipeline.pipe:\n self._pipe.append(stage)\n return self",
"def _init_pipeline(self, cfg: ConfigType) -> Callable:",
"def set_pipeline(self):\n pipe_distance = make_pipeline(DistanceTransformer(), RobustScaler())\n pipe_time = make_pipeline(TimeFeaturesEncoder(time_column='pickup_datetime'), OneHotEncoder(handle_unknown='ignore'))\n dist_cols = ['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude']\n time_cols = ['pickup_datetime']\n feat_eng_bloc = ColumnTransformer([('time', pipe_time, time_cols),\n ('distance', pipe_distance, dist_cols)]\n )\n self.pipeline = Pipeline(steps=[('feat_eng_bloc', feat_eng_bloc),\n ('regressor', RandomForestRegressor())])\n return self.pipeline",
"def pipe(self, func: Callable, *args, **kwargs) -> Any:\n return func(self, *args, **kwargs)",
"def transform(self, *args, **kwargs):\n raise NotImplementedError",
"def create_pipeline(self, train: LAMLDataset) -> LAMLTransformer:\n raise NotImplementedError",
"def __call__(self):\n self.tree = etree.parse(self.src)\n\n agent = transformer_factory(self.tree, self.options)\n self.tree = agent.transform()\n\n # Write out the finished product\n file = self._targetFile()\n self.tree.write(file, pretty_print=False)\n print 'wrote transformed channel:', file.name",
"def pipelines(self):\r\n return pipelines.Pipelines(self)",
"def make_full_pipeline(\n preprocess_pipe: ColumnTransformer, model: BaseEstimator\n) -> Pipeline:\n full_pipe = sklearn.pipeline.Pipeline(\n [(\"preprocess\", preprocess_pipe), (\"model\", model)]\n )\n return full_pipe",
"def fit(self, ts: TSDataset) -> \"Pipeline\":\n self.ts = ts\n self.ts.fit_transform(self.transforms)\n self.model.fit(self.ts)\n return self"
]
| [
"0.6798451",
"0.6618348",
"0.65910554",
"0.65910554",
"0.6581963",
"0.6552421",
"0.641886",
"0.6402468",
"0.63573986",
"0.63514197",
"0.6336858",
"0.62606335",
"0.62050164",
"0.6182921",
"0.6109646",
"0.60787505",
"0.6045552",
"0.60202897",
"0.6011419",
"0.6011088",
"0.6007795",
"0.58601063",
"0.5853776",
"0.58120096",
"0.5797448",
"0.57743484",
"0.5773251",
"0.57475185",
"0.5747338",
"0.5747201"
]
| 0.7730287 | 0 |
Lists all primitive nodes, starting with the Individual's main node. | def primitives(self) -> List[PrimitiveNode]:
primitives = [self.main_node]
current_node = self.main_node._data_node
while isinstance(current_node, PrimitiveNode): # i.e. not DATA_TERMINAL
primitives.append(current_node)
current_node = current_node._data_node
return primitives | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_nodes(self):\n return self.ironic_client.node.list()",
"def nodes(self): \n return [n for n in self.iternodes()]",
"def list_nodes(self, type_):\n raise NotImplementedError()",
"def displayNode(self):\n for x in self.__node:\n print(x)",
"def node_list(ctx):\n nodes = ctx.obj['controller'].get_node_list()\n nodes = [[x] for x in nodes]\n click.echo(generate_table(['NODE'], nodes, sort='NODE', plain=ctx.obj['plain']))",
"def get_node_list(self):\n return []",
"def print_list_of_nodes(self):\n\n for node in self.list_empty_nodes:\n print(\"--------------------------\")\n print(\"Node num : \"+str(node.num))\n print(\"Node distance from start point : \"+str(node.distance_from_start_point))\n if node.pere is None:\n print(\"Pas de père\")\n else:\n print(\"Num du père : \"+str(node.pere.num))",
"def nodes_individual(self):\n return self._nodes_individual",
"def test_get_hyperflex_node_list(self):\n pass",
"def get_nodes(self):\n pass",
"def list():\n index = 0\n while True:\n node = Node.from_index(index)\n if os.path.exists(node.path()):\n click.echo(f'{index}: node_{index}')\n click.echo(run_lncli(node, 'getinfo | jq .identity_pubkey'))\n else:\n break\n index += 1",
"def get_node_list(self):\n return [[node] for node in self.graph.nodes]",
"def nodes(self):\n return self._node_reg",
"def get_all_nodes(self):\n return self._get_all_nodes()",
"def all_nodes(self):\n nodes = []\n for layer in self.layers:\n nodes += layer.nodes\n return nodes",
"def list_nodes(self):\n\n return list(\n dict(\n self._from_json(self.manage.run(override=\"list-nodes\"))\n ).keys()\n )",
"def get_nodes(self) -> List[Node]:\n\t\treturn sorted(self.nodes, key=lambda x: x.name.lower())",
"def as_list(self):\n nodes = []\n node = self.first_node\n while node:\n nodes.append(node)\n node = node.next\n return nodes",
"def getNodes(self):\n return self.__allNodes",
"def nodes (self):\n return self.__nodes",
"def nodes(self):\n return self.__nodes",
"def get_nodes(self):\n return list(map(lambda x: x[0], self.__nodes))",
"def nodes(self):\r\n return (node.content for node in self.traverse())",
"def list_nodes(self):\n nodes = self.nodes\n result = []\n for i_node in self.iapi.node.list():\n if i_node.name:\n name = i_node.name\n else:\n # Sometimes Ironic does not show the names, pull them from Nova if possible.\n selected_nova_node = None\n for nova_node in nodes:\n if getattr(\n nova_node, 'OS-EXT-SRV-ATTR:hypervisor_hostname', None) == i_node.uuid:\n selected_nova_node = nova_node\n break\n if selected_nova_node:\n name = selected_nova_node.name\n else:\n name = None\n result.append(Node(i_node.uuid, name, i_node.power_state, i_node.provision_state))\n return result",
"def get_node_list(self):\n return self.node_list",
"def get_all_nodes(self):\n # NOTE: return copy, so no one will screw\n # our list?\n return self.nodes",
"def list_node_types(self):\n return list(nodelist.all_nodes.keys())",
"def nodes(self):\n return list(self.node_dict.keys())",
"def nodeItems(self):\n nodes = list()\n for item in self.items():\n if isinstance(item, NodeItem):\n nodes.append(item)\n return nodes",
"def nodes(topology):\n return topology.nodes()"
]
| [
"0.6407321",
"0.6391654",
"0.63787967",
"0.6297833",
"0.62783486",
"0.6180489",
"0.6174722",
"0.6101564",
"0.6089356",
"0.60265636",
"0.59619343",
"0.5940407",
"0.59089303",
"0.59052485",
"0.58955514",
"0.5893381",
"0.58924395",
"0.5887432",
"0.5851083",
"0.5831555",
"0.5817544",
"0.58089906",
"0.57993734",
"0.5789003",
"0.57660383",
"0.5746704",
"0.5740494",
"0.5718297",
"0.57107365",
"0.5699303"
]
| 0.7425432 | 0 |
Lists all terminals connected to the Individual's primitive nodes. | def terminals(self) -> List[Terminal]:
return [terminal for prim in self.primitives for terminal in prim._terminals] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def terminals(self):\n\n return self._terminals.getSlice(0)",
"def terminals(self):\n\n return self._terminals.getSlice(0)",
"def terminals(self):\n unique_nodes, unique_counts = np.unique(self.edges, return_counts=True)\n return unique_nodes[ unique_counts == 1 ]",
"def terminals(self) -> AbstractSet[Terminal]:\n return self._terminals",
"def primitives(self) -> List[PrimitiveNode]:\n primitives = [self.main_node]\n current_node = self.main_node._data_node\n while isinstance(current_node, PrimitiveNode): # i.e. not DATA_TERMINAL\n primitives.append(current_node)\n current_node = current_node._data_node\n return primitives",
"def displayNode(self):\n for x in self.__node:\n print(x)",
"def node_list(ctx):\n nodes = ctx.obj['controller'].get_node_list()\n nodes = [[x] for x in nodes]\n click.echo(generate_table(['NODE'], nodes, sort='NODE', plain=ctx.obj['plain']))",
"def list_nodes(self):\n return self.ironic_client.node.list()",
"def printall():\n print listAll()",
"def terminals(g):\n return set(g._lexical_index.keys())",
"def list():\n index = 0\n while True:\n node = Node.from_index(index)\n if os.path.exists(node.path()):\n click.echo(f'{index}: node_{index}')\n click.echo(run_lncli(node, 'getinfo | jq .identity_pubkey'))\n else:\n break\n index += 1",
"def get_terminals(self):\r\n return {symbol for _, symbols in self.productions for symbol in symbols if is_terminal(symbol)}",
"def _find_terminal_nodes(graph):\n termini = []\n for node in graph.nodes:\n if graph.degree(node) == 1:\n termini.append(node)\n\n return termini",
"def list():\n rino.remote.list()",
"def terminal_diagram(self):\n lines = [[unichr(9632) for x in range(k)]\n for k in self[1]]\n lines += [[unichr(9632) for x in range(k)]\n + [unichr(9675)] for k in self[0]]\n lines.sort(reverse=True, key=len)\n for line in lines:\n print(' '.join(line))\n print('')",
"def list_operators():\n for operator_symbol in operations:\n print(operator_symbol)",
"def terminal_types(self):\n return (self,)",
"def _MocaCtlGetNodeIDs(self):\n mc = subprocess.Popen([MOCACTL, 'showtbl', '--nodestats'],\n stdout=subprocess.PIPE)\n out, _ = mc.communicate(None)\n nodes = set()\n for line in out.splitlines():\n node = NODE_RE.search(line)\n if node is not None:\n nodes.add(int(node.group(1)))\n node_list = list(nodes)\n length = len(node_list)\n if int(self.AssociatedDeviceCount) != length:\n type(self).AssociatedDeviceCount.Set(self, length)\n return node_list",
"def __repr__(self):\n return \"Terminal('{}')\".format(self.name)",
"def list_nodes(self):\n\n return list(\n dict(\n self._from_json(self.manage.run(override=\"list-nodes\"))\n ).keys()\n )",
"def list(self):\n return self.rpc.call(MsfRpcMethod.ConsoleList)['consoles']",
"def print_list_of_nodes(self):\n\n for node in self.list_empty_nodes:\n print(\"--------------------------\")\n print(\"Node num : \"+str(node.num))\n print(\"Node distance from start point : \"+str(node.distance_from_start_point))\n if node.pere is None:\n print(\"Pas de père\")\n else:\n print(\"Num du père : \"+str(node.pere.num))",
"def nodes(self): \n return [n for n in self.iternodes()]",
"def print(self):\n output_string = \"Printing List of Nodes.\\n\"\n print(\"Printing List of Nodes\")\n for node in self.nodes:\n if node:\n output_string += str(node)\n node.print()\n return output_string",
"def get_nonterminals(self):\n for node in self.hosttree.traverse():\n if not node.is_leaf():\n yield node",
"def list_print(self):\n node = self.cur_node # cant point to ll!\n while node:\n print(node.data)\n node = node.next",
"def print_list(self) -> None:\n cur_node = self.head\n while cur_node:\n print(cur_node.data)\n cur_node = cur_node.next",
"def external_terminologies(self):\n terms = set()\n for node_record in self.graph.run(\"MATCH (n) RETURN (n)\"):\n node = node_record[\"n\"]\n if \"links_to\" in node:\n terms.add(node[\"links_to\"])\n return terms",
"def show_nodes(self):\n node_ids = [self.controller.node_id]\n\n if self._check_cluster():\n self.print_list(\n ('uid', 'status', 'roles'), self.controller.get_nodes(),\n lambda x: node_ids.index(x.get('uid'))\n )",
"def print_all(self):\n print(\n \"\"\"\\nContents of hash table, with blank lines separating distinct\n linked lists:\"\"\".replace(' ', ''))\n\n for linked_list in self.main_array:\n print(linked_list)\n print('')"
]
| [
"0.6630996",
"0.6630996",
"0.6356642",
"0.6237207",
"0.6134914",
"0.59288365",
"0.5890043",
"0.5884904",
"0.5793998",
"0.57869047",
"0.57813084",
"0.5745473",
"0.5714512",
"0.5654323",
"0.5578676",
"0.54320294",
"0.5428282",
"0.54231596",
"0.53973806",
"0.53786254",
"0.53716147",
"0.53713053",
"0.53691727",
"0.53630763",
"0.5330952",
"0.5321443",
"0.5298237",
"0.525564",
"0.52539396",
"0.52467847"
]
| 0.72116995 | 0 |
Replace the terminal at `position` by `new_terminal` inplace. | def replace_terminal(self, position: int, new_terminal: Terminal) -> None:
scan_position = 0
for primitive in self.primitives:
if scan_position + len(primitive._terminals) > position:
terminal_to_be_replaced = primitive._terminals[position - scan_position]
if terminal_to_be_replaced.identifier != new_terminal.identifier:
raise ValueError(
f"New terminal does not share output type with the old."
f"Old: {terminal_to_be_replaced.identifier}"
f"New: {new_terminal.identifier}."
)
primitive._terminals[position - scan_position] = new_terminal
return
else:
scan_position += len(primitive._terminals)
if scan_position < position:
raise ValueError(
f"Position {position} is out of range with {scan_position} terminals."
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def change_position(board: Board, position: Position, character: str) -> Board:\n board = list(board)\n \n row = board[position[0]]\n new_row = row[:position[-1]] + character + row[position[-1] + 1:]\n board[position[0]] = new_row\n\n board = tuple(board) \n\n return board",
"def reset_position(self, x, y):\n\t\tself.grid[x][y] = self.terminal",
"def set_position(self, new_pos):\n self._position = new_pos",
"def move_character(self, old_y, old_x, y_pos, x_pos):\n self.map[old_y][old_x] = ' '\n self.map[y_pos][x_pos] = 'G'",
"def set_position(self, new_pos, units=\"bohr\"):\n from numpy import array\n # Convert the input to the right units.\n pos = array(new_pos)\n if _IsAngstroem(units):\n pos /= AU_to_A\n if _IsAngstroem(self):\n pos *= AU_to_A\n pos = [x for x in pos]\n\n # Insert\n if 'r' in self.store:\n self.store['r'] = pos\n else:\n self.store[self.sym] = pos\n pass",
"def setPosition(self,newPos):\n self._position = newPos",
"def position(self, position):\n self.move_to(position)",
"def setPosition(position):",
"def move_to(self, position):\n raise NotImplementedError",
"def set_pos(self, newpos : list) :\n if len(newpos) == 2 :\n self.pos = list(newpos).copy()\n else :\n raise UserWarning('wrong position passed')",
"def assign_position(self, position):\n\n self._actual_position = list(position)\n self._position = [0, 0]\n if not self._widget_assigned:\n self._align()\n self._update_widgets()\n self._widget_assigned = True",
"def update_player(self, old_y, old_x, new_y, new_x):\n self.maze[old_y][old_x] = \" \"\n self.maze[new_y][new_x] = \"m\"",
"def replace_primitive(self, position: int, new_primitive: PrimitiveNode):\n last_primitive = None\n for i, primitive_node in enumerate(self.primitives):\n if i == position:\n if primitive_node._primitive.output != new_primitive._primitive.output:\n raise ValueError(\"New primitive output type differs from old.\")\n if isinstance(primitive_node._data_node, str):\n new_primitive._data_node = primitive_node._data_node\n else:\n new_primitive._data_node = primitive_node._data_node.copy()\n break\n else:\n last_primitive = primitive_node\n\n if last_primitive is None:\n self.main_node = new_primitive\n else:\n last_primitive._data_node = new_primitive",
"def new_position_edit(self, p):\n\n DBG(\"new edit position\")\n if self.mode != 'view':\n self.edit_widget.new_text(p.b)",
"def set_position(self, position):\n self.gripper_io.set_signal_value(\"position_m\", position)",
"def set_position(self, position):\n self.set_current_position(position)",
"def clear_position(board: Board, position: Position) -> Board:\n board = list(board)\n \n row = board[position[0]]\n new_row = row[:position[-1]] + '.' + row[position[-1] + 1:]\n board[position[0]] = new_row\n\n board = tuple(board) \n return board",
"def rightreplacelistitem(self, pos, newitem):\n self._rightlist.replace(pos, newitem)",
"def set_position(self, position):\n raise NotImplementedError()",
"def update_position(position):\n pass",
"def set_position(self, position):\n self.position = tuple(position)",
"def changeTo(self, oldSymbol, newSymbol):\n for x in range(0, len(self.tape)):\n if self.tape[x] == oldSymbol:\n self.tape[x] = newSymbol",
"def configure_custom_terminal(new_path):\n lnp.userconfig['terminal'] = new_path\n lnp.userconfig.save_data()",
"def regenerate_tree(self, newpos):\n self.path = self.tree[newpos][2]\n self.tree = self.get_tree()\n self.pos = self.get_curpos()",
"def set_position(self, dart, position) :\r\n self.positions[self.get_embedding_dart(dart,self.positions)] = position",
"def set_character(self, y_pos, x_pos):\n self.map[y_pos][x_pos] = 'G'",
"def repositionTurtle(t, x, y):\n t.up()\n t.goto(x, y)\n t.down()",
"def set_position(self, position: int) -> bool:\n hex_position = \"%0.2X\" % (100 - position) # curtain position in reverse mode\n return self._sendcommand(POSITION_KEY + hex_position, self._retry_count)",
"def replace_character_at_index(self, index, character):\n self._game_board = self.get_game()[:index] + character + self.get_game()[index + 1:]\n return self._game_board",
"def swap(self, position):\n if position not in self.swappable_positions:\n raise ValueError(\n \"Cannot swap this position: %s, it is not swappable.\" %\n str(position))\n\n newpuz = FifeteenPuzzle(copy.deepcopy(self.numbers))\n empty_position = newpuz.get_position(0)\n swap_number = newpuz.get(position)\n\n newpuz._put(empty_position, swap_number)\n newpuz._put(position, 0)\n\n return newpuz"
]
| [
"0.59610856",
"0.57170063",
"0.56611526",
"0.5623337",
"0.53372204",
"0.5206256",
"0.5122167",
"0.5102501",
"0.50833666",
"0.5053095",
"0.50475603",
"0.504088",
"0.5039403",
"0.50336826",
"0.50032115",
"0.49785298",
"0.49696037",
"0.4960981",
"0.49289528",
"0.49156287",
"0.491166",
"0.4855478",
"0.48537576",
"0.48397714",
"0.48250052",
"0.4802754",
"0.48017365",
"0.47984868",
"0.479663",
"0.47955948"
]
| 0.7815409 | 0 |
Replace the PrimitiveNode at `position` by `new_primitive`. | def replace_primitive(self, position: int, new_primitive: PrimitiveNode):
last_primitive = None
for i, primitive_node in enumerate(self.primitives):
if i == position:
if primitive_node._primitive.output != new_primitive._primitive.output:
raise ValueError("New primitive output type differs from old.")
if isinstance(primitive_node._data_node, str):
new_primitive._data_node = primitive_node._data_node
else:
new_primitive._data_node = primitive_node._data_node.copy()
break
else:
last_primitive = primitive_node
if last_primitive is None:
self.main_node = new_primitive
else:
last_primitive._data_node = new_primitive | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replace_terminal(self, position: int, new_terminal: Terminal) -> None:\n scan_position = 0\n for primitive in self.primitives:\n if scan_position + len(primitive._terminals) > position:\n terminal_to_be_replaced = primitive._terminals[position - scan_position]\n if terminal_to_be_replaced.identifier != new_terminal.identifier:\n raise ValueError(\n f\"New terminal does not share output type with the old.\"\n f\"Old: {terminal_to_be_replaced.identifier}\"\n f\"New: {new_terminal.identifier}.\"\n )\n primitive._terminals[position - scan_position] = new_terminal\n return\n else:\n scan_position += len(primitive._terminals)\n if scan_position < position:\n raise ValueError(\n f\"Position {position} is out of range with {scan_position} terminals.\"\n )",
"def add_to_primitive(self, primitive_id, attributes, indices, material, mode):\n primitive_id = self._resolve_mapping(inp=primitive_id, mapping=self.primitives_map)\n\n primitive = self.primitives[primitive_id]\n\n properties_key = [\"attributes\", \"indices\", \"material\", \"mode\"]\n properties_val = [attributes, indices, material, mode]\n for key, val in zip(properties_key, properties_val):\n if val is not None:\n primitive[key] = self._resolve_mapping(inp=val, mapping=self.accessors_map)\n\n return primitive_id",
"def create_primitive(self, name, attributes=None, indices=None, material=None, mode=None):\n new_primitive = self._build_primitive(attributes=attributes,\n indices=indices,\n material=self._resolve_mapping(inp=material,\n mapping=self.materials_map),\n mode=mode)\n\n self.primitives.append(new_primitive)\n\n self.primitives_map[name] = self._last_index(self.primitives)\n\n return self._last_index(self.primitives)",
"def test_mut_replace_primitive_len_1(self):\n ind1 = self.individuals[self.ind_strings[2]]\n self._test_mutation(ind1, mut_replace_primitive, self._mut_replace_primitive_is_applied)",
"def test_mut_replace_primitive_len_2(self):\n ind1 = self.individuals[self.ind_strings[1]]\n self._test_mutation(ind1, mut_replace_primitive, self._mut_replace_primitive_is_applied)",
"def replace(self,object,newObject):\n if object in self.cell.objects:\n objIndex = self.cell.objects.index(object)\n self.cell.objects[objIndex] = newObject\n else:\n objIndex = self.cell.tempObjects.index(object)\n self.cell.tempObjects[objIndex] = newObject\n self.cell.setChanged()",
"def replace_number(self, old, new):\r\n self.numbers[old] = new",
"def replace(self, newe):\n if not self.p: return\n p = self.p\n newe.p = p\n if isinstance(p, Expr):\n if p.l == self:\n p.l = newe\n elif p.r == self:\n p.r = newe\n elif isinstance(p, Paren):\n if p.c == self:\n p.c = newe\n else:\n raise Exception(\"replace() not implemented for %s of type %s\" % (self, type(self)))",
"def obj_make_compatible(self, primitive, target_version):\n target_version = utils.convert_version_to_tuple(target_version)",
"def replace_node(self, node, new_nodes):\n parent = node.parent\n position = parent.childNodes.index(node)\n parent.removeChild(node)\n\n for n in new_nodes:\n parent.insertChild(position, n)\n position += 1",
"def _mut_replace_primitive_is_applied(self, original, mutated):\n primitives_original = [el for el in original if isinstance(el, gp.Primitive)]\n primitives_mutated = [el for el in mutated if isinstance(el, gp.Primitive)]\n if len(primitives_original) != len(primitives_mutated):\n return (False, \"Number of primitives should be unchanged, was {} is {}.\"\n .format(len(primitives_original), len(primitives_mutated)))\n\n replaced_primitives = [p1 for p1, p2 in zip(primitives_original, primitives_mutated) if p1.name != p2.name]\n if len(replaced_primitives) != 1:\n return (False, \"Expected 1 replaced Primitive, found {}.\".format(len(replaced_primitives)))\n\n return (True, None)",
"def change_object(self, new_object):\n raise NotImplementedError",
"def place_new_piece(self):\n new_piece = PieceFactory.get_piece()\n new_piece = TransformPiece.transform(new_piece, PieceFactory.get_start_point(self.size, self.current_direction))\n self.active_piece = new_piece\n value = PieceFactory.get_value()\n for cell in self.active_piece:\n self.set_cell_value(cell, value)\n if self.piece_collision_exists(self.active_piece):\n self.handle_active_piece_collision()",
"def _replace(self, p, e):\n node = self._validate_position(p)\n old = node.element\n node.element = e\n return old",
"def retype(self, obj, new_type):\n\n if new_type not in self._object_types:\n raise ValueError('Parent type %s not registered in database' % new_type)\n\n # Reload and force pickled attributes into the dict.\n try:\n attrs = dict(self.get(obj))\n except TypeError:\n raise ValueError('Object (%s, %s) is not found in database' % (obj['type'], obj['id']))\n\n parent = attrs.get('parent')\n # Remove all attributes that aren't also in the destination type. Also\n # remove type, id, and parent attrs, which get regenerated when we add().\n for attr_name in list(attrs.keys()):\n # TODO: check src and dst attr types and try to coerce, and if\n # not possible, raise an exception.\n if attr_name not in self._object_types[new_type][1] or attr_name in ('type', 'id', 'parent'):\n del attrs[attr_name]\n\n new_obj = self.add(new_type, parent, **attrs)\n # Reparent all current children to the new id.\n for child in self.query(parent=obj):\n # TODO: if this raises, delete new_obj (to rollback) and reraise.\n self.reparent(child, new_obj)\n\n self.delete(obj)\n return new_obj",
"def set(self, value):\n assert (not is_sequence_like(value)) and (not is_dict_like(value)), 'the value must be an atomic primitive'\n token_index = self._value_token_index()\n self._tokens[token_index] = py2toml.create_primitive_token(value)",
"def replace(name, newobject):",
"def replace_node(self, node,new_node):\n #Special Case: Replace the root.\n if node == self.root :\n self.root = new_node\n return\n parent = node.parent\n if parent.left and parent.left == node:\n parent.left = new_node\n elif parent.right and parent.right == node:\n parent.right = new_node\n else:\n print(\"Incorrect Parent-Child relation!\")\n raise RuntimeError",
"def cell_replace_node(self,c,n_old,n_new):\n for ni in range(self.max_sides):\n if self.cells['nodes'][c,ni] == n_old:\n self.cells['nodes'][c,ni] = n_new\n if self._node_to_cells is not None:\n self._node_to_cells[n_old].remove(c)\n self._node_to_cells[n_new].append(c)",
"def _mutate_node(self, node):\n self.idx += 1\n\n if self.idx != self.r:\n return\n\n # Exclude some things like signatures, etc.\n exclusions = ['signature', 'crc']\n for ex in exclusions:\n if ex in node._pfp__name.lower():\n return\n\n if type(node) == pfp.fields.Dom:\n return\n elif self._base_name(node) == 'Struct':\n # This is a container, interested in\n # its children nodes\n return\n elif self._base_name(node) == 'Array':\n print(\"%s is an Array of %s (%s)\" % (node._pfp__name,\n node.field_cls, node.width))\n # I can change the data at once:\n node.raw_data = \"cacaca\"\n\n # Or iterate through its elements:\n # for e in node:\n # e._pfp__set_value(e._pfp__value + 1)\n else:\n # CORE TYPE\n # This is supposed to cast\n print('CORE TYPE?')\n node._pfp__set_value(1337)",
"def replace_buffer(new_data, offset, datatype):\n offset = offset - 1\n\n if offset < 0:\n # If our buffer was a zero buffer, we don't need to bother changing.\n return\n\n global buffer\n buffer_size = len(new_data.flatten()) * dtype_size(datatype)\n (buffer_size, new_data) = align(buffer_size, new_data)\n\n # print(offset)\n buffer[offset] = new_data",
"def mutate(self, node, _):\n new_node = ast.Num(n=node.n + 1)\n return new_node",
"def change_reference(position: Position, new_reference: Position, reference):\n\n refs = {\"up\": PositionService._change_up,\n \"down\": PositionService._change_down,\n \"left\": PositionService._change_left,\n \"right\": PositionService._change_right}\n\n if reference not in refs or (isinstance(position, Position) and isinstance(new_reference, Position)):\n raise TypeError(\"A serious error has occurred...\")",
"def _ReplaceCompound(self, from_id, to_id):\n if from_id == to_id:\n return\n \n # set the coefficient of the original compound to 0\n i = self._FindCompoundIndex(from_id)\n if i is None:\n return\n how_many = self.reactants[i].coeff\n self.reactants[i].coeff = 0\n\n # create a new compound with the new kegg_id and the same coefficient\n # or add the number to the coefficient if it already is a reactant\n j = self._FindCompoundIndex(to_id)\n if j is None:\n self.reactants[i] = CompoundWithCoeff.FromId(how_many, to_id)\n else:\n self.reactants[j].coeff += how_many\n self._Dedup()\n\n # clear the cache since the reaction has changed\n self._catalyzing_enzymes = None",
"def replace_blocks_hull(self, new_hull_type, hull_type=None):\n replace = Replace(self.smd3.get_block_list())\n replace.replace_hull(new_hull_type, hull_type)\n self.header.update(self.smd3)",
"def replace(self, item):\n return heapq.heapreplace(self.heap, item)",
"def set_position(self, new_pos):\n self._position = new_pos",
"def create_prim(stage: Usd.Stage, path: str, prim_type: str, \n translation: Optional[Tuple[float, float, float]] = None,\n rotation: Optional[Tuple[float, float, float]] = None,\n scale: Optional[Tuple[float, float, float]] = None,\n ref: Optional[str] = None,\n semantic_label: Optional[str] = None,\n attributes: Optional[dict] = {}) -> Usd.Prim:\n # Define prim in the input stage\n prim = stage.DefinePrim(path, prim_type)\n # Apply attributes from the input dictionary\n for k, v in attributes.items():\n prim.GetAttribute(k).Set(v)\n # Load reference USD file.\n if ref:\n prim.GetReferences().AddReference(ref)\n # Apply semantic label to the prim\n if semantic_label:\n sem = Semantics.SemanticsAPI.Apply(prim, \"Semantics\")\n sem.CreateSemanticTypeAttr()\n sem.CreateSemanticDataAttr()\n sem.GetSemanticTypeAttr().Set(\"class\")\n sem.GetSemanticDataAttr().Set(semantic_label)\n # Apply XFORM related properties to the prim\n xform_api = UsdGeom.XformCommonAPI(prim)\n # Apply rotation in XYZ coordinates in world frame\n if rotation:\n xform_api.SetRotate(rotation, UsdGeom.XformCommonAPI.RotationOrderXYZ)\n # Apply scale to the prim\n if scale:\n xform_api.SetScale(scale)\n # Apply transform (x, y, z) to the prim in world frame\n if translation:\n xform_api.SetTranslate(translation)\n\n return prim",
"def ReplaceElement(self, position, element):\n self.__context.builder.DocumentElementReplace(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n position, element)",
"def _place_new_obj(self, (screen_width, screen_height)):\n old_tree = self.objects.get()\n new_x = (-old_tree.position[0]) + old_tree.max_width*2 + screen_width\n another_tree = Grass((new_x, screen_height), self.width, self.height)\n self.objects.put(another_tree)"
]
| [
"0.57029593",
"0.55897343",
"0.51725495",
"0.50479764",
"0.5024666",
"0.49654397",
"0.48944867",
"0.48825198",
"0.4826908",
"0.48052645",
"0.47664177",
"0.47615296",
"0.4702991",
"0.46913335",
"0.46813706",
"0.46742797",
"0.46691516",
"0.4666951",
"0.4660286",
"0.46550384",
"0.46450067",
"0.4546846",
"0.45371312",
"0.4529568",
"0.45167822",
"0.44948038",
"0.447445",
"0.44685373",
"0.44616595",
"0.4459768"
]
| 0.8847874 | 0 |
Construct an Individual from its `pipeline_str` representation. | def from_string(
cls,
string: str,
primitive_set: dict,
to_pipeline: Optional[Callable] = None,
strict: bool = True,
) -> "Individual":
expression = PrimitiveNode.from_string(string, primitive_set, strict)
return cls(expression, to_pipeline=to_pipeline) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_str(cls, string):",
"def on_pipeline_from_string(code: str) -> PipelineInspectorBuilder:\n return PipelineInspectorBuilder(python_code=code)",
"def from_str(cls, s):\n raise NotImplementedError",
"def from_string(cls, string):\n normalised = cls.normalise_string(string)\n return cls.from_normalised_string(normalised)",
"def from_string(string):\n return Output('', magic=string)",
"def from_str(cls, s: str):\n instr, outstr = s.split('→')\n return ExperimentSetting(in_state=TensorProductState.from_str(instr),\n observable=PauliTerm.from_compact_str(outstr))",
"def from_string(cls, dlstr):\n\n smooth = None\n\n try:\n tokens = dlstr.lower().split()\n\n if tokens[0] != ExpandedEnsemble.key:\n raise ValueError()\n\n eta0 = float(tokens[1])\n c_upd = float(tokens[2])\n n_upd = int(tokens[3])\n\n try:\n n_itr = int(tokens[4])\n i_beg = int(tokens[5])\n i_end = int(tokens[6])\n omega = float(tokens[7])\n smooth = BiasSmoother(n_itr, i_beg, i_end, omega)\n\n except IndexError:\n # assume optional arguments not present\n pass\n\n except (IndexError, ValueError):\n msg = \"Expect 'ee eta0 c_upd u_upd []'; got {!r}\".format(dlstr)\n raise ValueError(msg)\n\n return ExpandedEnsemble(eta0, c_upd, n_upd, smooth)",
"def build_from_string(self, obj):\n if self.string_type is unicode and not isinstance(obj, unicode):\n obj = str(obj).decode('utf-8')\n if self.string_type is str and not isinstance(obj, str):\n obj = unicode(obj).encode('utf-8')\n return self.art_type(obj.splitlines())",
"def from_str(cls, value_str):\n return cls(value_str, cls._normalize_html(value_str))",
"def from_string(cls, dlstr):\n\n NotImplementedError(\"Should be implemented by subclass\")",
"def from_string(cls, dlstr):\n raise NotImplementedError(\"Should be implemented by subclass\")",
"def fromString(cls, string):\n raise NotImplementedError(\n 'fromString is not implemented on %r' % (cls.__name__,))",
"def fromStr(cls, s):\n assert isinstance(s, str), 'incorrect type of arg s: should be type str, is type {}'.format(type(s))\n s = [ int(n) for n in s.split('.') ]\n return cls(*s)",
"def fromstring(text, schema=None):\n if schema:\n parser = objectify.makeparser(schema=schema.schema)\n return objectify.fromstring(text, parser=parser)\n else:\n return objectify.fromstring(text)",
"def create_from_arg_string(cls, arg_string):\n return cls()",
"def __init__(self, input_str):\n raise NotImplementedError(\"This method needs to be implemented.\")",
"def construct(args,\n **kwargs):\n kw = parse_args(args)\n kw.update(kwargs)\n return (build_pipeline(**kw),\n kw)",
"def from_str(s: str) -> \"Lineage\":\n match = LINEAGE_REGEX.search(s)\n if not match:\n raise InvalidLineageString(\n f\"Lineage string {s} is not in the expected format.\"\n )\n major = match.group(\"major\")\n minor = match.group(\"minor\") or None\n return Lineage(major=major, minor=minor)",
"def from_str(cls, ref_str: str):\n project = \"\"\n if \"/\" in ref_str:\n project, ref_str = ref_str.split(\"/\")\n\n return cls(project, ref_str)",
"def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.object1 = str[start:end].decode('utf-8')\n else:\n self.object1 = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.object2 = str[start:end].decode('utf-8')\n else:\n self.object2 = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.penetration_distance, _x.operation,) = _struct_di.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def __init__(self,\r\n name: str,\r\n pipeline: PipelineBase,\r\n is_deterministic: bool = True):\r\n self.name = name\r\n self.is_deterministic = is_deterministic\r\n self.pipeline = pipeline",
"def fromstring(cls, str_pkt):\n xml_pkt = lxml.objectify.fromstring(str_pkt)\n layers = [Layer(proto) for proto in xml_pkt.proto]\n geninfo, frame, layers = layers[0], layers[1], layers[2:]\n frame.raw_mode = True\n return cls(layers=layers,\n length=int(geninfo.get_field_value('len')),\n captured_length=int(geninfo.get_field_value('caplen')),\n sniff_time=geninfo.get_field_value('timestamp', raw=True),\n interface_captured=frame.get_field_value('interface_id'))",
"def from_string(cls, iri_string, encoding='utf-8'):\n iri_string = compat.to_str(iri_string, encoding)\n\n split_iri = misc.IRI_MATCHER.match(iri_string).groupdict()\n return cls(\n split_iri['scheme'], split_iri['authority'],\n normalizers.encode_component(split_iri['path'], encoding),\n normalizers.encode_component(split_iri['query'], encoding),\n normalizers.encode_component(split_iri['fragment'], encoding),\n encoding,\n )",
"def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.group = str[start:end].decode('utf-8')\n else:\n self.group = str[start:end]\n _x = self\n start = end\n end += 2\n (_x.rand_start, _x.current_start,) = _get_struct_2B().unpack(str[start:end])\n self.rand_start = bool(self.rand_start)\n self.current_start = bool(self.current_start)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.start_pos = struct.unpack(pattern, str[start:end])\n start = end\n end += 1\n (self.rand_target,) = _get_struct_B().unpack(str[start:end])\n self.rand_target = bool(self.rand_target)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.target_pos = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 4\n (_x.execute, _x.wait, _x.ret_plan, _x.ret_fps,) = _get_struct_3Bb().unpack(str[start:end])\n self.execute = bool(self.execute)\n self.wait = bool(self.wait)\n self.ret_plan = bool(self.ret_plan)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def from_string(string, _or=''):\n if _or:\n and_or = 'or'\n else:\n and_or = ''\n return Input(string, and_or=and_or)",
"def from_string(string):\n # in order to complete this lab we are going to use the python lib json in which we have the function json.loads\n # which will automatically load a json from a string\n return json.loads(string)",
"def from_str(cls, line) -> \"VersionStructure\":\n major, minor, patch = [int(item) for item in line.split(\".\")]\n return cls(major=major, minor=minor, patch=patch)",
"def construct_from_string(cls, string):\n # Remove fletcher specific naming from the arrow type string.\n if string.startswith(\"fletcher[\"):\n string = string[9:-1]\n\n if string == \"list<item: string>\":\n return cls(pa.list_(pa.string()))\n\n try:\n type_for_alias = pa.type_for_alias(string)\n except (ValueError, KeyError):\n # pandas API expects a TypeError\n raise TypeError(string)\n\n return cls(type_for_alias)",
"def from_string(\n cls: Type[_CromwellWorkflowLabel], workflow_label: str\n ) -> _CromwellWorkflowLabel:\n count_equals = workflow_label.count(\"=\")\n count_escaped_equals = workflow_label.count(\"\\\\=\")\n\n if count_equals - count_escaped_equals == 0:\n return cls(cls.CAPER_STR_LABEL, workflow_label)\n\n if count_equals - count_escaped_equals != 1:\n raise ValueError(\n \"Found more than one unescaped `=` in key=value pair, must only '\"\n \"specify one so parsing is not ambiguous\"\n )\n\n for i, char in enumerate(workflow_label):\n if char == \"=\":\n if workflow_label[i - 1] != \"\\\\\":\n key, value = workflow_label[0:i], workflow_label[i + 1 :]\n return cls(key, value)\n\n # Can skip coverage here, we know the loop above always executes on a string\n # with one non-escaped equals sign in it\n raise ValueError(\"Could not detect key-value pair\") # pragma: no cover",
"def from_string(string):\n return Sentence(string.split(\" \"))"
]
| [
"0.61646575",
"0.59634817",
"0.5879963",
"0.5879679",
"0.574354",
"0.56137294",
"0.5543158",
"0.5537403",
"0.5508447",
"0.5491343",
"0.5461753",
"0.5400685",
"0.5326605",
"0.52744126",
"0.525887",
"0.52570957",
"0.5246513",
"0.52418774",
"0.5240452",
"0.52136445",
"0.51783484",
"0.5170267",
"0.51398164",
"0.51370454",
"0.510177",
"0.5084098",
"0.50648737",
"0.50607216",
"0.50263584",
"0.50223243"
]
| 0.7422873 | 0 |
Generate C API RST as string | def gen_capi(args):
if not args.header:
return ""
cmd = ["ctags", "-x", "--c-kinds=fpsgx", args.header]
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
if process.returncode:
return ""
titles = {
"nvm_geo": "Geometry",
"nvm_buf": "Buffer Allocation",
"nvm_dev": "Device Management",
"nvm_addr": "Addressing",
"nvm_cmd": "Raw Commands",
"nvm_vblk": "Virtual Block",
"nvm_bbt": "Bad-Block-Table"
}
docs = {}
lib = {}
for line in out.split("\n"):
parts = (" ".join(line.split())).split(" ")[:2]
if len(parts) < 2:
continue
name, kind = parts
ns = "_".join(name.split("_")[:2])
if ns not in lib:
lib[ns] = {}
if kind not in lib[ns]:
lib[ns][kind] = []
lib[ns][kind].append(name)
for ns in lib:
if "prototype" in lib[ns]:
ordering = [
"bbt_get", "bbt_set", "bbt_mark", "bbt_flush",
"addr_erase", "addr_read", "addr_write", "addr_check",
"addr_.*2",
"vblk_erase", "vblk_p?read", "vblk_p?write", "vblk_pad",
"lba_p?read", "lba_p?write",
"_alloc", "_fill", "_free", "_pr",
"_get_", "_set_"
]
ordered = []
for order in ordering:
for func in lib[ns]["prototype"]:
if re.search(order, func):
if func not in ordered:
ordered.append(func)
lib[ns]["prototype"] = list(
set(lib[ns]["prototype"]) -
set(ordered)
) + ordered
title = "%s - %s" % (ns, titles[ns]) if ns in titles else ns
rst = "\n".join([
".. _sec-capi-%s:" % ns, "",
title,
"=" * len(title),
"", ""
])
if "typedefs" in lib[ns]:
for typedef in lib[ns]["typedefs"]:
rst += "\n".join([
typedef,
"-" * len(typedef), "",
".. doxygentypedef:: %s" % typedef,
"", ""
])
for mangler in ["struct", "externvar"]:
if mangler in lib[ns]:
for struct in lib[ns][mangler]:
rst += "\n".join([
struct,
"-" * len(struct), "",
".. doxygenstruct:: %s" % struct,
" :members:",
"", ""
])
if "enum" in lib[ns]:
for enum in lib[ns]["enum"]:
rst += "\n".join([
enum,
"-" * len(enum), "",
".. doxygenenum:: %s" % enum,
"", ""
])
if "prototype" in lib[ns]:
for func in lib[ns]["prototype"]:
rst += "\n".join([
func,
"-" * len(func), "",
".. doxygenfunction:: %s" % func,
"", ""
])
docs[ns] = rst
return docs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DocString():\n return",
"def gen_rst(txt):\n res = \"\"\n title_block = get_title_block(txt)\n title_rst = gen_title_rst(title_block)\n res += title_rst\n # add a blank line just to be sure\n res += \"\\n\"\n fun_blocks = get_fun_blocks(txt)\n for (name, fun_block) in fun_blocks:\n fun_rst = gen_fun_rst(name, fun_block)\n res += fun_rst\n return res",
"def gen_fun_rst(name, txt):\n (desc, params, example) = parse_fun_block(txt)\n directive = gen_rst_directive(name, params)\n example_rst = gen_example_rst(example)\n res = \"\"\"\n{directive}\n{desc}\n{example}\n\n\"\"\".format(name=name,\n directive=directive,\n desc=indent(desc, 2),\n example=example_rst)\n return res",
"def gen_header(cmd_list):\n\ts = \"/* Warning: This file is automatically generated. Do not modify. */\\n\"\n\ts += \"#ifndef COMMGEN_H\\n\"\n\ts += \"#define COMMGEN_H\\n\\n\"\n\ts += \"#ifdef __cplusplus\\n\"\n\ts += \"extern \\\"C\\\" {\\n\"\n\ts += \"#endif\\n\\n\"\n\ts += \"#include <stdint.h>\\n\\n\"\n\ts += gen_struct_def(cmd_list)\n\ts += \"/* To avoid the volatile qualifier being a pain in the ass, the main loop\\n\"\n\ts += \" * accesses the DataReal struct through this pointer. */\\n\"\n\ts += \"extern volatile struct comm_data_t *Data;\\n\\n\"\n\ts += \"/* Parse a packet, update the struct, and send a reply. */\\n\"\n\ts += \"void parse_packet(uint8_t *buf, uint16_t count);\\n\\n\"\t\n\tfor c in cmd_list:\n\t\ts += gen_send_proto(c) + \"\\n\"\n\t\ts + gen_parse_proto(c) + \"\\n\"\n\ts += gen_packing_protos()\n\ts += gen_build_str_dec()\n\t#s += \"void send_packet(uint8_t *data, uint16_t count);\\n\\n\"\n\ts += \"#ifdef __cplusplus\\n\"\n\ts += \"}\\n\"\n\ts += \"#endif\\n\\n\"\t\n\ts += \"#endif\\n\"\n\treturn s",
"def gen_csource(protocol):\n\tdef format_default(reg):\n\t\t\"\"\"Given a reg, return its default value formatted as a string for inclusion in\n\t\t a C source file.\"\"\"\n\t\tif reg.size == \"accum\":\n\t\t\treturn str(float(reg.default)) + \"k\"\n\t\telse:\n\t\t\treturn str(int(reg.default)) + \"L\"\n\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <avr/interrupt.h>\n#include <util/atomic.h>\n#include \"protocol.h\"\n#include \"spi.h\"\n\n\"\"\"\n\ts += \"volatile struct comm_data_t Data = {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t.\" + r.name + \" = \" + format_default(r) + \", /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\ts += \"\\n\"\n\t\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void){ /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"\"\"\\t%s v;\n\tATOMIC_BLOCK(ATOMIC_RESTORESTATE){\n\t\tv = Data.%s;\n\t}\n\treturn v;\n}\n\"\"\"%(r.size, r.name)\n\t\ts += \"void set_%s(%s v){ /* %s */\\n\"%(r.name, r.size, r.desc)\n\t\ts += \"\"\"\\tATOMIC_BLOCK(ATOMIC_RESTORESTATE){\n\t\tData.%s = v;\n\t}\n}\n\n\"\"\"%(r.name)\n\ts += \"\"\"ISR(SPI0_STC_vect){\n\tuint8_t reg_num = SPDR0;\n\tswitch(reg_num){\n\"\"\"\n\t\n\tfor r in protocol:\n\t\tif r.write:\n\t\t\ts += \"\\t\\tcase % 2d: /* Write %s (%s) */\\n\"%(r.number, r.name, r.desc)\n\t\t\ts += \"\\t\\t\\tspi_rx((uint8_t *) &Data.%s, sizeof(Data.%s));\\n\"%(r.name, r.name)\n\t\t\ts += \"\\t\\t\\tbreak;\\n\"\n\t\tif r.read:\n\t\t\ts += \"\\t\\tcase 0x80 + % 2d: /* Read %s (%s) */\\n\"%(r.number, r.name, r.desc)\n\t\t\ts += \"\\t\\t\\tspi_tx((uint8_t *) &Data.%s, sizeof(Data.%s));\\n\"%(r.name, r.name)\n\t\t\ts += \"\\t\\t\\tbreak;\\n\"\n\ts += \"\"\"\t}\n\n\t/* Clear SPIF flag */\n\treg_num = SPSR0;\n\treg_num = SPDR0;\n}\n\"\"\"\t\n\treturn s",
"def create_cfile_head(self):\n head = \"\"\"#include <stdio.h>\n#include <stdlib.h>\n#include <time.h>\n#include \"../init_array_lib/init_dyn_array.h\"\n#include \"../pips_lib/define_script.h\"\n\n\nint main(int argc, const char* argv[])\n{\n srand(time(NULL));\n \"\"\"\n\n self.append_text_to_file(str(head))",
"def fortran_c_wrapper(self) -> str:\n return ''",
"def gen_build_str_def():\n\treturn \"\"",
"def format(self):\n include = ' '.join(['-I%s' % s for s in self.includes])\n linklib = ' '.join(['-L%s' % s for s in self.linklibs])\n buff = ['## generated by build.py %s' % time.ctime(),\n 'COMPILER_DIR = %s' % self.compilerdir,\n 'SHAREDLIB = %s' % self.sharedlib,\n 'ARCH_TARGET = %s' % self.arch_target,\n 'LINK_LIB = %s' % linklib,\n 'INCLUDE = %s' % include,\n 'CONDA_COMPAT = %s' % self.conda_compat,\n 'INSTALL_PREFIX = %s' % self.install_prefix,\n '####', '']\n return '\\n'.join(buff).replace('\\\\', '/')",
"def gen_cheader(protocol):\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <stdfix.h>\n#include <stdint.h>\n#include \"config.h\"\n\n\"\"\"\n\ts += \"struct comm_data_t {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t\" + r.size + \" \" + r.name + \"; /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void); /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"void set_%s(%s); /* %s */\\n\\n\"%(r.name, r.size, r.desc)\n\ts += \"\"\"extern volatile struct comm_data_t Data;\"\"\"\n\treturn s",
"def main_docstring():",
"def gen_rst_directive(fun_name, params):\n res = \".. cmake:function:: %s\" % fun_name\n res += \"(\"\n sig_params = [decorate(name, type) for(type, name, doc) in params]\n sig_params = [x for x in sig_params if x is not None]\n sig_params = \" \".join(sig_params)\n res += sig_params\n res += \")\"\n res += \"\\n\"\n res += \"\\n\"\n for param in params:\n (type, name, doc) = param\n if type == \"example\":\n # \\example is handled by gen_example_rst\n continue\n doc = doc.replace(\"\\n\", \" \")\n to_add = \":arg %s: %s\" % (name, doc)\n res += indent(to_add, 2)\n res += \"\\n\"\n return res",
"def gen_python(protocol):\n\ts = \"\"\n\tfor r in protocol:\n\t\tif r.write:\n\t\t\ts += \"def set_%s(value): #%s\\n\"%(r.name, r.desc)\n\t\t\ts += \"\\twrite_reg_raw(%d, \\\"%s\\\", value)\\n\\n\"%(r.number, r.size)\n\t\tif r.read:\n\t\t\ts += \"def get_%s(): #%s\\n\"%(r.name, r.desc)\n\t\t\ts += \"\\treturn read_reg_raw(%d, \\\"%s\\\")\\n\\n\"%(r.number, r.size)\n\treturn s",
"def api(self) -> str:",
"def generate(self):\n return \"\"",
"def fortran_c_wrapper(self) -> str:\n result = banner('//')\n result += self._fc_includes()\n result += self._fc_using_statements()\n result += self._fc_function_definitions()\n return result",
"def gen_example_rst(example):\n if not example:\n return \"\"\n res = \"\"\"**Example**\n\n.. literalinclude:: /samples/{example}/CMakeLists.txt\n :language: cmake\n\n\"\"\"\n return res.format(example=example)",
"def __str__(self):\n buf = io.StringIO()\n args.output.write(buf, self.root, self.headings)\n return buf.getvalue()",
"def writeCode(doc):\n\n comp_template = \"model.addCompartment(vol=%s, comp_id='%s');\"\n species_template = \"model.addSpecies(species_id='%s', amt=%s, comp='%s');\"\n param_template = \"model.addParameter(param_id='%s', val=%s, units='%s');\"\n rxn_template = (\n \"model.addReaction(reactants=%s, products=%s, \"\n \"expression='%s', local_params=%s, rxn_id='%s');\"\n )\n event_template = (\n \"model.addEvent(trigger='%s', assignments=%s, persistent=%s, \"\n \"initial_value=%s, priority=%s, delay=%s, event_id='%s');\"\n )\n event_defaults = [True, False, \"0\", 0]\n assignrule_template = \"model.addAssignmentRule(var='%s', math='%s');\"\n raterule_template = \"model.addRateRule(var='%s', math='%s', rr_id='%s');\"\n initassign_template = \"model.addInitialAssignment(symbol='%s', math='%s')\"\n init_template = (\n \"import simplesbml\\nmodel = simplesbml.sbmlModel(time_units='%s', \"\n \"extent_units='%s', sub_units='%s', level=%s, version=%s);\"\n )\n init_defaults = [\"min\", \"Molar\", \"Molar\", 3, 1]\n command_list = []\n\n if doc.getLevel() == 1:\n warnings.warn(\"Warning: SimpleSBML does not support SBML Level 1.\")\n\n props = libsbml.ConversionProperties()\n props.addOption(\"flatten comp\", True)\n result = doc.convert(props)\n if result != libsbml.LIBSBML_OPERATION_SUCCESS:\n raise SystemExit(\"Conversion failed: (\" + str(result) + \")\")\n\n mod = doc.getModel()\n comps = mod.getListOfCompartments()\n species = mod.getListOfSpecies()\n params = mod.getListOfParameters()\n rxns = mod.getListOfReactions()\n events = mod.getListOfEvents()\n rules = mod.getListOfRules()\n print(\"rules\", rules)\n inits = []\n if doc.getLevel() == 3 or (doc.getLevel() == 2 and doc.getVersion() > 1):\n inits = mod.getListOfInitialAssignments()\n\n timeUnits = \"min\" # second\n substanceUnits = \"Molar\" # mole\n extentUnits = \"Molar\" # mole\n if doc.getLevel() == 3:\n timeUnits = mod.getTimeUnits()\n extentUnits = mod.getExtentUnits()\n substanceUnits = mod.getSubstanceUnits()\n level = mod.getLevel()\n version = mod.getVersion()\n init_list = [timeUnits, extentUnits, substanceUnits, level, version]\n for i in range(0, 5):\n if init_list[i] == init_defaults[i]:\n init_list[i] = \"del\"\n\n command_list.append(\n init_template\n % (init_list[0], init_list[1], init_list[2], init_list[3], init_list[4])\n )\n\n for comp in comps:\n if comp.getId() != \"c1\":\n if comp.getId()[0] == \"c\" and comp.getId()[1 : len(comp.getId())].isdigit():\n if comp.getSize() == 1e-15:\n command_list.append(comp_template % (\"del\", \"del\"))\n else:\n command_list.append(comp_template % (comp.getSize(), \"del\"))\n else:\n if comp.getSize() == 1e-15:\n command_list.append(comp_template % (\"del\", comp.getId()))\n else:\n command_list.append(comp_template % (comp.getSize(), comp.getId()))\n\n for s in species:\n conc = s.getInitialConcentration()\n amt = s.getInitialAmount()\n sid = s.getId()\n if s.getCompartment() == \"c1\":\n comp = \"del\"\n else:\n comp = s.getCompartment()\n bc = s.getBoundaryCondition()\n if bc:\n sid = \"$\" + sid\n if isnan(conc) or amt > conc:\n command_list.append(species_template % (sid, str(amt), comp))\n else:\n command_list.append(species_template % (\"[\" + sid + \"]\", str(conc), comp))\n\n for p in params:\n val = p.getValue()\n pid = p.getId()\n if p.getUnits() == \"per_second\":\n units = \"del\"\n else:\n units = p.getUnits()\n isDelay = pid.find(\"Delay\")\n if isDelay == -1:\n command_list.append(param_template % (pid, str(val), str(units)))\n\n for v in rxns:\n vid = v.getId()\n if vid[0] == \"v\" and vid[1 : len(vid)].isdigit():\n vid = \"del\"\n reactants = []\n for r in v.getListOfReactants():\n reactants.append(\n (str(r.getStoichiometry()) + \" \" + r.getSpecies()).replace(\"1.0 \", \"\")\n )\n products = []\n for p in v.getListOfProducts():\n products.append(\n (str(p.getStoichiometry()) + \" \" + p.getSpecies()).replace(\"1.0 \", \"\")\n )\n expr = libsbml.formulaToString(v.getKineticLaw().getMath())\n local_params = {}\n local_ids = []\n local_values = []\n for k in v.getKineticLaw().getListOfParameters():\n local_ids.append(k.getId())\n local_values.append(k.getValue())\n local_params = dict(zip(local_ids, local_values))\n if len(local_params) == 0:\n local_params = \"del\"\n command_list.append(\n rxn_template % (str(reactants), str(products), expr, str(local_params), vid)\n )\n\n for e in events:\n persistent = True\n initialValue = False\n priority = \"0\"\n eid = e.getId()\n if len(eid) == 0 or (eid[0] == \"e\" and eid[1 : len(eid)].isdigit()):\n eid = \"del\"\n if doc.getLevel() == 3:\n persistent = e.getTrigger().getPersistent()\n initialValue = e.getTrigger().getInitialValue()\n priority = e.getPriority()\n if isinstance(priority, libsbml.Priority):\n priority = libsbml.formulaToL3String(priority.getMath())\n else:\n priority = \"0\"\n tri = libsbml.formulaToL3String(e.getTrigger().getMath())\n did = e.getDelay()\n if isinstance(did, libsbml.Delay):\n delay = libsbml.formulaToL3String(did.getMath())\n else:\n delay = \"0\"\n assigns = e.getListOfEventAssignments()\n var = []\n values = []\n for assign in assigns:\n var.append(assign.getVariable())\n values.append(libsbml.formulaToL3String(assign.getMath()))\n assigns = dict(zip(var, values))\n\n event_list = [persistent, initialValue, priority, delay]\n for i in range(0, 4):\n if event_list[i] == event_defaults[i]:\n event_list[i] = \"del\"\n\n command_list.append(\n event_template\n % (\n tri,\n str(assigns),\n event_list[0],\n event_list[1],\n event_list[2],\n event_list[3],\n eid,\n )\n )\n\n for r in rules:\n rid = r.getId()\n print(\"rid\")\n # if rid[0] == 'Rate' and rid[1:len(rid)].isdigit():\n # rid = 'del'\n sym = r.getVariable()\n math = libsbml.formulaToL3String(r.getMath())\n if r.getTypeCode() == libsbml.SBML_ASSIGNMENT_RULE:\n command_list.append(assignrule_template % (sym, math))\n elif r.getTypeCode() == libsbml.SBML_RATE_RULE:\n command_list.append(raterule_template % (sym, math, rid))\n else:\n pass\n\n for i in inits:\n sym = i.getSymbol()\n math = libsbml.formulaToL3String(i.getMath())\n command_list.append(initassign_template % (sym, math))\n\n commands = \"\\n\".join(command_list)\n commands = sub(r\"\\w+='?del'?(?=[,)])\", \"\", commands)\n commands = sub(r\"\\((, )+\", \"(\", commands)\n commands = sub(r\"(, )+\\)\", \")\", commands)\n commands = sub(\"(, )+\", \", \", commands)\n return commands",
"def to_str(self) -> str:\n from axonius_api_client.tools import json_dump\n\n value: str = f\"\"\"\n## {self.name}\n\nGUI Path:\n\n{self.gui_path}\n\n### Description for {self.name} \n\n{self.description.strip()}\n\n### Description of examples for {self.name} \n\n{self.example_description.strip()}\n\n### Example of attribute in REST API Saved Query object for {self.name} \n\nPath to attribute in REST API Saved Query object:\n{self.saved_query_path}\n\n```json\n{json_dump(self.rest_api_saved_query.value)}\n```\n\n### Example of attribute in REST API get assets object for {self.name} \n\nAttribute in REST API get assets object:\n{self.rest_api_get_assets.arg}\n\n```json\n{json_dump(to_json_api(self.rest_api_get_assets.value, \"entity_request_schema\"))}\n```\n\n### Example of argument in API Client get assets method for {self.name} \n\nArgument to API Client get assets method:\n{self.api_client_get_assets.arg}\n\n```python\nimport axonius_api_client as axonapi\nconnect_args: dict = axonapi.get_env_connect()\nclient: axonapi.Connect = axonapi.Connect(**connect_args)\n\n{self.api_client_get_assets.value.strip()}\n\n# or client.users.get(...)\n# or client.vulnerabilities.get(...)\n```\n\n### Example of argument in axonshell get assets command for {self.name} \n\nArgument in axonshell get assets command:\n{self.axonshell_get_assets.arg}\n\n```console\n{self.axonshell_get_assets.value.strip()}\n```\n\"\"\"\n return value",
"def gen_build_str_dec():\n\t#Get name of person building firmware\n\t#git config --get-all user.name\n\t#Get repo revision\n\t#git log | head -1 | cut -d \" \" -f 2\n\t#Get branch\n\t#git branch | grep \"\\*\" | cut -d \" \" -f 2\n\t#Get modified status\n\t#Date, time, gcc version (__VERSION__)\n\ts = \"Miniboard Firmware rev \"\n\treturn \"\"",
"def generate_header():\n header_file = AUTOGEN_WARNING\n header_file += \"/// /file atomic_nuclear_data.h\\n\"\n header_file += \"/// /author Andrew Davis ([email protected])\\n\"\n header_file += \"///\\n\"\n header_file += (\n \"/// /brief Implements all the fundamental atomic & nuclear data data\\n\"\n )\n header_file += \"#include <map>\\n\"\n header_file += \"\\n\"\n header_file += \"namespace pyne\\n\"\n header_file += \"{\\n\"\n header_file += (\n \" /// main function to be called when you wish to load the nuclide data \\n\"\n )\n header_file += \" /// into memory \\n\"\n header_file += \" void _load_atomic_mass_map_memory();\\n\"\n header_file += \" /// function to create mapping from nuclides in id form\\n\"\n header_file += \" /// to their atomic masses\\n\"\n header_file += \" \\n\"\n header_file += \" void _insert_atomic_mass_map();\\n\"\n header_file += \" \\n\"\n header_file += \" /// function to create mapping from nuclides in id form \\n\"\n header_file += \" /// to their natural abundances\\n\"\n header_file += \" void _insert_abund_map();\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to their natural abundances\\n\"\n )\n header_file += \" extern std::map<int,double> natural_abund_map;\\n\"\n header_file += \" \\n\"\n header_file += \" /// Mapping from nuclides in id form to their atomic masses.\\n\"\n header_file += \" extern std::map<int,double> atomic_mass_map;\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to the associated error in \\n\"\n )\n header_file += \" /// abdundance \\n\"\n header_file += \" extern std::map<int,double> atomic_mass_error_map;\\n\"\n header_file += \"} // namespace pyne\\n\"\n return header_file",
"def _generate_class_rst(cls):\n if not inspect.isclass(cls):\n raise TypeError(\"Expecting class, got {}\".format(type(cls)))\n\n cls_qualname = _get_public_class_name(cls)\n rst_header = cls_qualname.split(\".\")[-1]\n rst_module = \".\".join(cls_qualname.split(\".\")[:-1])\n rst_header = \"\".join([\".. _\", rst_header, \"_api:\"])\n\n def write_rubric(o, indent, rubric_display, rubric_tag, cls_qualname):\n _write_line(o, indent + \".. rubric:: \" + rubric_display)\n _write_empty_line(o)\n _write_line(o, indent + \".. autoautosummary:: \" + cls_qualname)\n _write_line(o, indent + indent + \":\" + rubric_tag + \":\")\n _write_empty_line(o)\n\n with io.StringIO() as output:\n # Attributes\n all_attributes = _get_filtered_names(cls, _is_class_property)\n # Methods, separated into public/private\n all_methods = _get_filtered_names(cls, _is_class_method)\n all_public_methods = []\n all_private_methods = []\n for _name in all_methods:\n if _name.startswith(\"_\"):\n all_private_methods.append(_name)\n else:\n all_public_methods.append(_name)\n\n _write_line(output, rst_header)\n _write_empty_line(output)\n _write_marquee(output, cls_qualname)\n _write_empty_line(output)\n\n _write_line(output, \".. currentmodule:: \" + rst_module)\n _write_empty_line(output)\n\n _write_line(output, \".. autoclass:: \" + cls_qualname)\n _write_empty_line(output)\n\n indent = \" \"\n attributes_header = \"Attributes\"\n private_methods_header = \"Private methods\"\n public_methods_header = \"Public methods\"\n\n if all_attributes:\n write_rubric(\n output,\n indent,\n attributes_header + \":\",\n \"attributes\",\n cls_qualname,\n )\n if all_public_methods:\n write_rubric(\n output,\n indent,\n public_methods_header + \":\",\n \"methods\",\n cls_qualname,\n )\n if all_private_methods:\n write_rubric(\n output,\n indent,\n private_methods_header + \":\",\n \"private_methods\",\n cls_qualname,\n )\n\n _write_empty_line(output)\n\n if all_attributes:\n _write_underlined(output, attributes_header, \"-\")\n _write_empty_line(output)\n for n in all_attributes:\n _write_line(\n output,\n \".. autoattribute:: \" + \".\".join([cls_qualname, n]),\n )\n _write_empty_line(output)\n\n if all_public_methods:\n _write_underlined(output, public_methods_header, \"-\")\n _write_empty_line(output)\n for n in all_public_methods:\n _write_line(\n output,\n \".. autofunction:: \" + \".\".join([cls_qualname, n]),\n )\n _write_empty_line(output)\n\n # Private methods\n if all_private_methods:\n _write_underlined(output, private_methods_header, \"-\")\n _write_empty_line(output)\n for n in all_private_methods:\n _write_line(\n output,\n \".. autofunction:: \" + \".\".join([cls_qualname, n]),\n )\n return output.getvalue()",
"def get_doc(self) -> Documentation:\n r : Documentation = [self.get_doc_string()]\n r_src = \"\"\n if hasattr(self,\"_path\"): r_src += \"locally at '%s'\" % (str(self._path))\n if self.url is not None: r_src += \" remote url(orig) '%s'\" % (self.url)\n r_src += \" remote url(parsed) '%s'\" % (self.git_url.as_string())\n if self.branch is not None: r_src += \" branch '%s'\" % (self.branch)\n r.append(r_src)\n r_stages = []\n for (sn,s) in self.stages.items():\n r_stages.append(sn)\n pass\n r_stages.sort()\n if len(r_stages)>0:\n r.append(\"Stages: %s\"%(\" \".join(r_stages)))\n pass\n return r",
"def _make_source(name, init, body):\n code = \"\"\"\n #include <Python.h>\n\n %(body)s\n\n PyMODINIT_FUNC\n PyInit_%(name)s(void) {\n %(init)s\n }\n \"\"\" % dict(\n name=name, init=init, body=body,\n )\n return code",
"def generate():",
"def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res",
"def gen_python_api(json_data, model_name,model_version,endpoint=\"http://127.0.0.1:8400\"):\n\n code_template = \"\"\"#!/usr/bin/env python\n\n import requests\n\n def main():\n #endpoint = \"http://127.0.0.1:8000\"\n endpoint = {{endpoint}}\n param={\"model_name\": \"{{ model_name }}\", \"model_version\": \"{{ model_version }}\"}\n json_data = {{json_data}}\n result = requests.post(endpoint, param=param,json=json_data)\n print(result.text)\n\n if __name__ == \"__main__\":\n main()\n \"\"\"\n\n generated_tensor_data_string = json.dumps(json_data)\n template = Template(code_template)\n generate_code = template.render(\n model_name=model_name, model_version=model_version,json_data=generated_tensor_data_string,endpoint=endpoint)\n logging.debug(\"Generate the code in Python:\\n{}\".format(generate_code))\n return generate_code",
"def build_python_api_main(outputdir, components):\n mainrst_filename = Path(outputdir, 'index.rst')\n # list documented (python) packages\n docpython_dir = Path(outputdir, 'python')\n packages = [f for f in docpython_dir.glob('*')]\n packages = [p.name for p in packages]\n # (ugly) trick to print components in the expected order.\n pack = {}\n for p in components:\n for pname in packages:\n if pname.count(p) > 0:\n pack[pname] = components[p]\n packages = [p[0] for p in sorted(pack.items(), key=operator.itemgetter(1))] \n\n if len(packages) > 0:\n with open(mainrst_filename, 'a') as f:\n # label = '.. _siconos_python_reference:\\n\\n\\n'\n title = 'Siconos Python API reference'\n title += '\\n' + len(title) * '#' + '\\n\\n'\n title += 'This is the documentation of '\n title += '`python <https://www.python.org/>`_ '\n title += 'interface to Siconos.\\n\\n\\n'\n header = '.. toctree::\\n :maxdepth:3\\n\\n'\n f.write(title)\n f.write(header)\n for p in packages:\n if p in modules_docs:\n title = p.replace('_','.') + ': ' + modules_docs[p]\n directive = title + ' <python/' + p + '/autodoc>\\n'\n else:\n directive = 'python/' + p + '/autodoc\\n\\n'\n directive = textwrap.indent(directive, ' ')\n f.write(directive)\n f.write('\\n')",
"def __buildDocumentClassDocString():\n\n # build a dictionary of tags and their descriptions, seems a little over\n # the top, but keeps all the information in one place\n tagsStrings = {\n \"comment\" : \"Define the comment string\",\n \"define\" : \"Define the symbol name for #define's\",\n \"info\" : \"Information string, to end up in the 'info' output\",\n \"instance\" : \"Instance name\",\n \"matlabRoot\" : \"Name of variable used by the matlab output\",\n \"members\" : \"List of symbols, which are going to be children of this symbol\",\n \"name\" : \"Name of this symbol\",\n \"size\" : \"Size of this symbol, i.e. indicate it is an array\",\n \"subtype\" : \"Define the actual type of general symbol\",\n \"symbol\" : \"Define a symbol, either a top level entity a child in a members\",\n \"test\" : \"Define the preprocessor test\",\n \"text\" : \"Text to put into a banner symbol\",\n \"title\" : \"Set the overall document title\",\n \"value\" : \"Define a value for this symbol\",\n \"valuesRequired\" : \"Does the enumeration allow automatic value assignment in entries\",\n }\n # build the list of classes\n classes = dict(filter(lambda (k,v): type(v) == types.ClassType, globals().iteritems()))\n (tagsUsed, optionsUsed) = buildKeys(classes)\n\n # build the string we are going to add to the document class\n s = \"Document class that represents the XML document and contains the data.\\n\\n\"\n s += \"Available tags:\\n\"\n\n for tag in tagsStrings:\n try:\n used = \" Required by : %s\\n\" % (\", \".join(tagsUsed[tag]))\n except KeyError:\n used = \"\"\n try:\n opts = \" Optional for: %s\\n\" % (\", \".join(optionsUsed[tag]))\n except KeyError:\n opts = \"\"\n s += \" %s\\n %s\\n %s\\n\\n%s%s\\n\" % (tag, \"-\"*len(tag), tagsStrings[tag], used, opts)\n\n return s"
]
| [
"0.6348354",
"0.61365306",
"0.604517",
"0.5923929",
"0.5889454",
"0.58836114",
"0.58230215",
"0.5807948",
"0.57579327",
"0.5728273",
"0.5695438",
"0.5687779",
"0.56875557",
"0.5671",
"0.56369007",
"0.5624715",
"0.55779654",
"0.55236787",
"0.5504747",
"0.5488637",
"0.54666436",
"0.54519117",
"0.54493517",
"0.54440135",
"0.5412374",
"0.54087794",
"0.54059374",
"0.54057556",
"0.5395799",
"0.5384657"
]
| 0.61944115 | 1 |
Generates code for lhs_ast = new_ast, where new_ast is an array initializer of either Type[expr1][expr2]...[exprN] or { { { ... } } }. Currently on supports Type[expr1] | def new_array(lhs_ast, new_ast):
assert(new_ast.tag == 'NEW_ARRAY')
rank = new_ast.rank
assert(rank > 0)
type_name = new_ast.type_name
array_var = checker.new_temp()
env = {}
template = '{\n'
template += 'var %s%s %s;\n' % (type_name, '[]'*rank, array_var)
if rank == 1:
template += new_array1(new_ast, array_var, env)
# elif rank == 2:
# template += new_array2(new_ast, array_var)
else:
checker.errors.add(new_ast.coord, 'UNINMP',
'array initialization for rank > 1 not implemented')
return Ast('BLOCK', coord=new_ast.coord)
env['$lhs'] = lhs_ast
template += '$lhs = %s;\n' % array_var
template += '}\n'
return Template.substitute('block', template, env) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getAstNode_newTypeInstance(funcEnv, objType, argAst=None, argType=None):\n interpreter = funcEnv.interpreter\n origObjType = objType\n while isinstance(objType, CTypedef):\n objType = objType.type\n while isinstance(argType, CTypedef):\n argType = argType.type\n\n if isinstance(objType, CBuiltinType) and objType.builtinType == (\"void\",):\n # It's like a void cast. Return None.\n if argAst is None:\n return NoneAstNode\n tup = ast.Tuple(elts=(argAst, NoneAstNode), ctx=ast.Load())\n return getAstNodeArrayIndex(tup, 1)\n\n arrayLen = None\n if isinstance(objType, CArrayType):\n arrayOf = getAstNodeForVarType(funcEnv, objType.arrayOf)\n if objType.arrayLen:\n arrayLen = getConstValue(interpreter.globalScope.stateStruct, objType.arrayLen)\n assert arrayLen is not None\n if isinstance(argType, (tuple, list)):\n assert arrayLen == len(argType)\n else:\n # Handle array type extra here for the case when array-len is not specified.\n assert argType is not None\n if isinstance(argType, (tuple, list)):\n arrayLen = len(argType)\n else:\n assert isinstance(argType, CArrayType)\n arrayLen = getConstValue(interpreter.globalScope.stateStruct, argType.arrayLen)\n assert arrayLen is not None\n # Write back to type so that future getCType calls will succeed.\n objType.arrayLen = CNumber(arrayLen)\n\n typeAst = ast.BinOp(left=arrayOf, op=ast.Mult(), right=ast.Num(n=arrayLen))\n else:\n typeAst = getAstNodeForVarType(funcEnv, origObjType)\n\n if isinstance(argType, (tuple, list)): # CCurlyArrayArgs\n assert isinstance(argAst, ast.Tuple)\n assert len(argAst.elts) == len(argType)\n # There is a bit of inconsistency between basic types init\n # (like c_int), which must get a value (int),\n # and ctypes.Structure/ctypes.ARRAY, which for some field can either\n # get a value (int) or a c_int. For pointers, it must get\n # the var, not the value.\n # This is mostly the same as for calling functions.\n f_args = []\n while isinstance(objType, CTypedef):\n objType = objType.type\n if isinstance(objType, CStruct):\n for c in objType.body.contentlist:\n if not isinstance(c, CVarDecl): continue\n f_args += [c.type]\n elif isinstance(objType, CArrayType):\n f_args += [objType.arrayOf] * arrayLen\n else:\n assert False, \"did not expect type %r\" % objType\n assert len(argType) <= len(f_args)\n # Somewhat like autoCastArgs():\n s_args = []\n for f_arg_type, s_arg_ast, s_arg_type in zip(f_args, argAst.elts, argType):\n s_arg_ast = _makeVal(funcEnv, f_arg_type, s_arg_ast, s_arg_type)\n s_args += [s_arg_ast]\n return makeAstNodeCall(typeAst, *s_args)\n\n if isinstance(objType, CArrayType) and isinstance(argType, CArrayType):\n return ast.Call(func=typeAst, args=[], keywords=[], starargs=argAst, kwargs=None)\n\n if isinstance(argType, CWrapFuncType):\n if isVoidPtrType(objType):\n vAst = getAstNode_newTypeInstance(\n funcEnv, CFuncPointerDecl(type=argType.func.type, args=argType.func.args),\n argAst=argAst, argType=argType)\n astCast = getAstNodeAttrib(\"ctypes\", \"cast\")\n return makeAstNodeCall(astCast, vAst, typeAst)\n if isinstance(objType, CWrapFuncType):\n return argAst\n assert isinstance(objType, CFuncPointerDecl) # what other case could there be?\n return makeAstNodeCall(getAstNodeAttrib(\"helpers\", \"makeFuncPtr\"), typeAst, argAst)\n\n if isinstance(objType, CPointerType) and usePyRefForType(objType.pointerOf):\n # We expect a PyRef.\n return makeAstNodeCall(getAstNodeAttrib(\"helpers\", \"PyRef\"),\n *([getAstNodeAttrib(argAst, \"ref\")] if argAst else []))\n\n if isPointerType(objType, checkWrapValue=True) and isPointerType(argType, checkWrapValue=True):\n # We can have it simpler. This is even important in some cases\n # were the pointer instance is temporary and the object\n # would get freed otherwise!\n astCast = getAstNodeAttrib(\"ctypes\", \"cast\")\n return makeAstNodeCall(astCast, argAst, typeAst)\n\n if isSameType(interpreter.globalScope.stateStruct, objType, ctypes.c_void_p) and \\\n isinstance(argType, CFuncPointerDecl):\n # We treat CFuncPointerDecl not as a normal pointer.\n # However, we allow casts to c_void_p.\n astCast = getAstNodeAttrib(\"ctypes\", \"cast\")\n return makeAstNodeCall(astCast, argAst, typeAst)\n\n if isinstance(objType, CFuncPointerDecl) and isinstance(argType, CFuncPointerDecl):\n # We did not allow a pointer-to-func-ptr cast above.\n # But we allow func-ptr-to-func-ptr.\n astCast = getAstNodeAttrib(\"ctypes\", \"cast\")\n return makeAstNodeCall(astCast, argAst, typeAst)\n\n args = []\n if argAst is not None:\n if isinstance(argAst, (ast.Str, ast.Num)):\n args += [argAst]\n elif argType is not None:\n args += [getAstNode_valueFromObj(interpreter._cStateWrapper, argAst, argType)]\n else:\n # expect that it is the AST for the value.\n # there is no really way to 'assert' this.\n args += [argAst]\n\n if isPointerType(objType, checkWrapValue=True) and argAst is not None:\n # Note that we already covered the case where both objType and argType\n # are pointer types, and we get a ctypes pointer object.\n # In that case, we can use ctypes.cast, which is more or less safe.\n # Note what this case here means:\n # We get an integer from somewhere, and interpret is as a pointer.\n # So, if there is a bug in how we got this integer, this can\n # potentially lead to an invalid pointer and hard to find bug.\n # Also, if the memory was allocated before by Python,\n # normally the ctypes pointer handling would keep a reference\n # to the underlying Python object.\n # When we however just get the raw pointer address as an integer\n # and then convert that back to a pointer at this place,\n # it doesn't know about the underlying Python objects.\n # When the underlying Python objects will get out-of-scope\n # at some later point, which we cannot control here,\n # this again would lead to hard to find bugs.\n assert len(args) == 1\n return makeAstNodeCall(getAstNodeAttrib(\"intp\", \"_getPtr\"), args[0], typeAst)\n #astVoidPT = getAstNodeAttrib(\"ctypes\", \"c_void_p\")\n #astCast = getAstNodeAttrib(\"ctypes\", \"cast\")\n #astVoidP = makeAstNodeCall(astVoidPT, *args)\n #return makeAstNodeCall(astCast, astVoidP, typeAst)\n\n if isIntType(objType) and args:\n # Introduce a Python int-cast, because ctypes will fail if it is a float or so.\n assert len(args) == 1\n args = [makeAstNodeCall(ast.Name(id=\"int\", ctx=ast.Load()), *args)]\n if isinstance(objType, (CStruct, CUnion)) and argAst:\n # We get the object itself. We expect that this is supposed to be a copy.\n # However, there is no such thing as a copy constructor.\n assert len(args) == 1\n return makeAstNodeCall(Helpers.assign, makeAstNodeCall(typeAst), *args)\n if isinstance(objType, CVariadicArgsType):\n if argAst:\n return makeAstNodeCall(Helpers.VarArgs, argAst)\n assert isinstance(funcEnv.astNode, ast.FunctionDef)\n # TODO: Normally, we would assign the var via va_start().\n # However, we just always initialize with the varargs tuple also already here\n # because we have the ref to the real varargs here.\n # See globalincludewrappers.\n return makeAstNodeCall(\n Helpers.VarArgs,\n ast.Name(id=funcEnv.astNode.args.vararg or \"None\", ctx=ast.Load()),\n ast.Name(id=\"intp\", ctx=ast.Load()))\n return makeAstNodeCall(typeAst, *args)",
"def visit_AugAssign(self, node):\n self.generic_visit(node)\n stmts = []\n target = node.target\n if not isinstance(target, ast.Subscript):\n return node\n\n # AST node for target value, gensym-ed if necessary.\n if self.can_reevaluate(target.value):\n target_node = target.value\n else:\n target_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(target_node, ast.Store())], target.value))\n \n # AST node for index, gensym-ed if necessary.\n index_expr = self.index_to_expr(target.slice)\n if self.can_reevaluate(index_expr):\n index_node = index_expr\n else:\n index_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(index_node, ast.Store())], index_expr))\n \n # Main AST node for the indexed augemented assignment.\n stmts.append(ast.Expr(\n to_call(to_attribute(self.operator, 'setitem'), [\n target_node,\n index_node,\n to_call(self.op_to_function(node.op), [\n to_call(to_attribute(self.operator, 'getitem'), [\n target_node,\n index_node,\n ]),\n node.value\n ])\n ])\n ))\n\n return stmts",
"def makeit_ssa(exprs):\n # Identify recurring LHSs\n seen = {}\n for i, e in enumerate(exprs):\n seen.setdefault(e.lhs, []).append(i)\n # Optimization: don't waste time reconstructing stuff if already in SSA form\n if all(len(i) == 1 for i in seen.values()):\n return exprs\n # SSA conversion\n c = 0\n mapper = {}\n processed = []\n for i, e in enumerate(exprs):\n where = seen[e.lhs]\n rhs = e.rhs.xreplace(mapper)\n if len(where) > 1:\n needssa = e.is_Scalar or where[-1] != i\n lhs = Symbol(name='ssa%d' % c, dtype=e.dtype) if needssa else e.lhs\n if e.is_Increment:\n # Turn AugmentedAssignment into Assignment\n processed.append(e.func(lhs, mapper[e.lhs] + rhs, is_Increment=False))\n else:\n processed.append(e.func(lhs, rhs))\n mapper[e.lhs] = lhs\n c += 1\n else:\n processed.append(e.func(e.lhs, rhs))\n return processed",
"def create_Assign(left_hand_side, right_hand_side):\n right_hand_side.ctx = ast.Load()\n left_hand_side.ctx = ast.Store()\n return ast.Assign(targets=[left_hand_side], value=right_hand_side)",
"def parse_ast_args(cls, ast_args: List) -> Union[tree.AstNode, List[tree.AstNode]]:\n if cls == tree.Declaration and len(ast_args) >= 3:\n # We deal with chained declarations here (`int a = b = 1;`). We want two separate variable declarations.\n if(ast_args[2] == '['):\n var_type, identifier, _, valInt, _ = ast_args\n ast_args[0] = ast_args[0] + str(tree.Identifier(valInt.value))\n ast_args = ast_args[: 2]\n else:\n print(ast_args)\n var_type, identifier, expr = ast_args\n if isinstance(expr, tree.Assignment):\n # We should raise an error somehow if there's no previous declaration of the variable here.\n # A good solution would maintain a mapping to the original source code so we can show where the error is.\n # We want to move the assignment node one up so it is **sibling** to this declaration node.\n # Then the declaration should be made with the value of the assigned variable.\n ast_args[2] = tree.Identifier(expr.identifier.name)\n return [expr, parse_ast_args(cls, ast_args)]\n\n if cls == tree.Function:\n # Sometimes we don't have function arguments. I don't know how to handle it but here, rearranging args order.\n assert len(ast_args) in {3, 4}\n if len(ast_args) == 4:\n # Swap function args and body so it works with our class' constructor default args.\n ast_args[2], ast_args[3] = ast_args[3], ast_args[2]\n\n if cls == tree.Expr and any(op in ast_args for op in tree.BinOp.OPERATORS):\n # We want to parse 4 / 3 * 2 with left-associativity. (it should output 2)\n # It means we need to parse the multiplication first\n *left_hand_side, op, right_hand_side = ast_args\n assert op in tree.BinOp.OPERATORS, \"Operator should be in second place in the token list\"\n\n if len(left_hand_side) > 1:\n # We need to parse something like 1 + 2 + 3 + 4\n left_hand_side = parse_ast_args(cls, left_hand_side)\n else:\n # The right hand side is a single expression, it was already parsed into an ast.\n left_hand_side = left_hand_side[0]\n\n return tree.BinOp(left_hand_side, op, right_hand_side)\n\n # We 'unnest' the structure - these classes are abstract so we are rly interested in what they contain.\n if cls == tree.Expr:\n assert len(ast_args) == 1\n return ast_args[0]\n if cls == tree.Statement:\n return ast_args[0] if ast_args else None\n\n # Hack. Esp since some 'class_name' refer to functions.\n if \"\\t\" in ast_args:\n ast_args.remove(\"\\t\")\n\n if cls == tree.Assignment and len(ast_args) >= 3:\n # We deal with chained declarations here (`int a = b = 1;`). We want two separate variable declarations.\n if(ast_args[1] == '['):\n identifier, _, valInt, _, expres = ast_args\n identifier.name = identifier.name + \"[\" + str(valInt.value) + \"]\"\n ast_args[0] = identifier\n ast_args[1] = expres\n ast_args = ast_args[: 2]\n\n if cls == tree.Identifier and len(ast_args) > 1:\n if (ast_args[1] == '['):\n identifier, _, valInt, _ = ast_args\n tmp = str(valInt)[:10]\n if tmp == \"Identifier\":\n identifier = identifier + \"[\" + valInt.name + \"]\"\n else:\n identifier = identifier + \"[\" + str(valInt.value) + \"]\"\n ast_args[0] = identifier\n ast_args = ast_args[: 1]\n\n return cls(*ast_args)",
"def gen_global_initialize_expression(self, typ, expr):\n cval = self._constant_evaluator.eval_expr(expr)\n\n if isinstance(cval, tuple):\n assert cval[0] is ir.ptr and len(cval) == 2\n mem = (cval,)\n else:\n mem = (self.context.pack(typ, cval),)\n return mem",
"def build_ast(expression):\n\n # use a directed graph to store the tree\n G = DiGraph()\n\n stack = []\n\n for n in expression:\n # Since the graph does not maintain the order of adding nodes/edges\n # add an extra attribute 'pos' so we can always sort to the correct order\n if isinstance(n, OperatorNode):\n if n.ttype == ept.TOK_TYPE_OP_IN:\n arg2 = stack.pop()\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_node(arg2, pos=2)\n G.add_edge(arg1, n)\n G.add_edge(arg2, n)\n else:\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_edge(arg1, n)\n\n elif isinstance(n, FunctionNode):\n args = [stack.pop() for _ in range(n.num_args)]\n args.reverse()\n for i, a in enumerate(args):\n G.add_node(a, pos=i)\n G.add_edge(a, n)\n # for i in range(n.num_args):\n # G.add_edge(stack.pop(),n)\n else:\n G.add_node(n, pos=0)\n\n stack.append(n)\n\n return G, stack.pop()",
"def translate_to_c(Newast):\n ast = parse_file('exampleMin.c', use_cpp=True)\n\n ast.show()\n #print(\"newast: \", Newast.ext[0].decl.type.args.params[0].type.type==ast.ext[0].decl.type.args.params[0].type.type)\n #print(\"newast2: \", Newast.ext[0].decl.type.args.params[0].type.type.coord)\n #print(\"ast2: \", ast.ext[0].decl.type.args.params[0].type.type.coord)\n\n #Newast.show()\n \n # print(ast.ext[0].decl.bitsize)\n # print(Newast.ext[0].decl.bitsize)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.coord)\n # print(Newast.ext[0].decl.type.args.coord)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params)\n # print(Newast.ext[0].decl.type.args.params)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0])\n # print(Newast.ext[0].decl.type.args.params[0])\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type)\n # print(Newast.ext[0].decl.type.args.params[0].type)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type.type)\n # print(Newast.ext[0].decl.type.args.params[0].type.type)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type.type.names)\n # print(Newast.ext[0].decl.type.args.params[0].type.type.names)\n # print(\"----------------------------------\")\n\n generator = c_generator.CGenerator()\n #ast.show()\n\n # tracing the generator for debugging\n # import trace\n # tr = trace.Trace(countcallers=1)\n # tr.runfunc(generator.visit, Newast)\n # tr.results().write_results()\n\n print(generator.visit(Newast))",
"def _compat_assign_gast_5(targets, value, type_comment):\n return gast.Assign(targets=targets, value=value, type_comment=type_comment)",
"def literal_array(cls, elems):\n tys = [el.type for el in elems]\n if len(tys) == 0:\n raise ValueError(\"need at least one element\")\n ty = tys[0]\n for other in tys:\n if ty != other:\n raise TypeError(\"all elements must have the same type\")\n return cls(types.ArrayType(ty, len(elems)), elems)",
"def evaluateStructure(compiled_expression):",
"def ast_to_blitz_expr(ast_seq):\n # Don't overwrite orignal sequence in call to transform slices.\n ast_seq = copy.deepcopy(ast_seq)\n slice_handler.transform_slices(ast_seq)\n\n # Build the actual program statement from ast_seq\n expr = ast_tools.ast_to_string(ast_seq)\n\n # Now find and replace specific symbols to convert this to\n # a blitz++ compatible statement.\n # I'm doing this with string replacement here. It could\n # also be done on the actual ast tree (and probably should from\n # a purest standpoint...).\n\n # this one isn't necessary but it helps code readability\n # and compactness. It requires that\n # Range _all = blitz::Range::all();\n # be included in the generated code.\n # These could all alternatively be done to the ast in\n # build_slice_atom()\n expr = expr.replace('slice(_beg,_end)', '_all')\n expr = expr.replace('slice', 'blitz::Range')\n expr = expr.replace('[','(')\n expr = expr.replace(']', ')')\n expr = expr.replace('_stp', '1')\n\n # Instead of blitz::fromStart and blitz::toEnd. This requires\n # the following in the generated code.\n # Range _beg = blitz::fromStart;\n # Range _end = blitz::toEnd;\n #expr = expr.replace('_beg', 'blitz::fromStart' )\n #expr = expr.replace('_end', 'blitz::toEnd' )\n\n return expr + ';\\n'",
"def stmts_to_stmt(statements):\n if len(statements) == 1:\n return statements[0]\n array = FakeArray(statements, arr_type=pr.Array.NOARRAY)\n return FakeStatement([array])",
"def test_49_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 2] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,449))",
"def test_48_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 2] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,448))",
"def gen_load_code(var_name: str, ty: type) -> str:\n\n tys = type_str(ty)\n\n if tys.startswith(\"Set[\"):\n assert tys.endswith(\"]\")\n inside = tys[4:-1]\n ans = f\"{var_name} = set(json.load(sys.stdin))) # convert set (stored as json dictionary)\"\n assertions = [f\"all(isinstance(x, {inside}) for x in {var_name})\"]\n else:\n ans = f\"{var_name} = json.load(sys.stdin)\"\n num_lists = tys.count(\"List[\")\n assert tys.startswith(\"List[\" * num_lists) and tys.endswith(\"]\" * num_lists)\n inside = tys[5 * num_lists: len(tys) - num_lists]\n if num_lists == 0:\n assertions = [f\"isinstance({var_name}, {inside})\"]\n else:\n assertions = [f\"isinstance({var_name}, list)\"]\n if num_lists == 1:\n assertions.append(f\"all(isinstance(x, {inside}) for x in {var_name})\")\n else:\n assertions.append(f\"all(isinstance(x, list) for x in {var_name})\")\n if num_lists == 2:\n assertions.append(f\"all(isinstance(y, {inside}) for x in {var_name} for y in x)\")\n elif num_lists == 3:\n assertions += [f\"all(isinstance(y, list) for x in {var_name} for y in x)\",\n f\"all(isinstance(z, {inside}) for x in {var_name} for y in x for z in y)\"]\n else:\n assert False, f'Unknown type {tys}'\n\n assert inside in [\"int\", \"float\", \"bool\", \"str\"], f'Unknown type {tys}'\n return ans + \"\\n\\n\" + \"\\n\".join(f\"assert {a}, 'Type error: expecting `{tys}`'\" for a in assertions)",
"def _compat_assign_gast_4(targets, value, type_comment):\n return gast.Assign(targets=targets, value=value)",
"def visit_code(self, code):\n\n def build_tuple(tup):\n out = []\n for e in tup:\n if isinstance(e, tuple):\n out.append(build_tuple(e))\n else:\n out.append(('prim', type(e)))\n return ('tuple', tuple(out))\n\n folds = _FoldedOps()\n for block in code.order:\n stack = _Stack()\n for op in block:\n if isinstance(op, opcodes.LOAD_CONST):\n elt = code.consts[op.arg]\n if isinstance(elt, tuple):\n typ = build_tuple(elt)\n stack.push(_Constant(typ, elt, typ[1], op))\n else:\n stack.push(_Constant(('prim', type(elt)), elt, None, op))\n elif isinstance(op, opcodes.BUILD_LIST):\n stack.build(list, op)\n elif isinstance(op, opcodes.BUILD_SET):\n stack.build(set, op)\n elif isinstance(op, opcodes.FORMAT_VALUE):\n if op.arg & loadmarshal.FVS_MASK:\n stack.build_str(2, op)\n else:\n stack.build_str(1, op)\n elif isinstance(op, opcodes.BUILD_STRING):\n stack.build_str(op.arg, op)\n elif isinstance(op, opcodes.BUILD_MAP):\n map_ = stack.fold_map_args(op.arg, op)\n if map_:\n typ = ('map', (map_.key_types, map_.value_types))\n val = dict(zip(map_.keys, map_.values))\n stack.push(_Constant(typ, val, map_.elements, op))\n elif isinstance(op, opcodes.BUILD_CONST_KEY_MAP):\n keys = stack.pop()\n vals = stack.fold_args(op.arg, op)\n if vals:\n keys.op.folded = op\n _, t = keys.typ\n typ = ('map', (frozenset(t), vals.types))\n val = dict(zip(keys.value, vals.values))\n elements = dict(zip(keys.value, vals.elements))\n stack.push(_Constant(typ, val, elements, op))\n elif isinstance(op, opcodes.LIST_APPEND):\n elements = stack.fold_args(2, op)\n if elements:\n lst, element = elements.elements\n tag, et = lst.typ\n assert tag == 'list'\n typ = (tag, et | {element.typ})\n value = lst.value + [element.value]\n elements = lst.elements + (element,)\n stack.push(_Constant(typ, value, elements, op))\n elif isinstance(op, opcodes.LIST_EXTEND):\n elements = stack.fold_args(2, op)\n if elements:\n lst, other = elements.elements\n tag, et = lst.typ\n assert tag == 'list'\n other_tag, other_et = other.typ\n if other_tag == 'tuple':\n # Deconstruct the tuple built in opcodes.LOAD_CONST above\n other_elts = tuple(_Constant(('prim', e), v, None, other.op)\n for (_, e), v in zip(other_et, other.value))\n elif other_tag == 'prim':\n assert other_et == str\n other_et = {other.typ}\n other_elts = tuple(_Constant(('prim', str), v, None, other.op)\n for v in other.value)\n else:\n other_elts = other.elements\n typ = (tag, et | set(other_et))\n value = lst.value + list(other.value)\n elements = lst.elements + other_elts\n stack.push(_Constant(typ, value, elements, op))\n elif isinstance(op, opcodes.MAP_ADD):\n elements = stack.fold_args(3, op)\n if elements:\n map_, key, val = elements.elements\n tag, (kt, vt) = map_.typ\n assert tag == 'map'\n typ = (tag, (kt | {key.typ}, vt | {val.typ}))\n value = {**map_.value, **{key.value: val.value}}\n elements = {**map_.elements, **{key.value: val}}\n stack.push(_Constant(typ, value, elements, op))\n elif isinstance(op, opcodes.DICT_UPDATE):\n elements = stack.fold_args(2, op)\n if elements:\n map1, map2 = elements.elements\n tag1, (kt1, vt1) = map1.typ\n tag2, (kt2, vt2) = map2.typ\n assert tag1 == tag2 == 'map'\n typ = (tag1, (kt1 | kt2, vt1 | vt2))\n value = {**map1.value, **map2.value}\n elements = {**map1.elements, **map2.elements}\n stack.push(_Constant(typ, value, elements, op))\n else:\n # If we hit any other bytecode, we are no longer building a literal\n # constant. Insert a None as a sentinel to the next BUILD op to\n # not fold itself.\n stack.push(None)\n\n # Clear the stack to save any folded constants before exiting the block\n stack.clear()\n\n # Now rewrite the block to replace folded opcodes with a single\n # LOAD_FOLDED_CONSTANT opcode.\n out = []\n for op in block:\n if id(op) in stack.consts:\n t = stack.consts[id(op)]\n arg = t\n pretty_arg = t\n o = opcodes.LOAD_FOLDED_CONST(op.index, op.line, arg, pretty_arg)\n o.next = op.next\n o.target = op.target\n o.block_target = op.block_target\n o.code = op.code\n op.folded = o\n folds.add(op)\n out.append(o)\n elif op.folded:\n folds.add(op)\n else:\n out.append(op)\n block.code = out\n\n # Adjust 'next' and 'target' pointers to account for folding.\n for op in code.code_iter:\n if op.next:\n op.next = folds.resolve(op.next)\n if op.target:\n op.target = folds.resolve(op.target)\n return code",
"def _generate_coral_ast(node, names={}):\n if isinstance(node, ast.FunctionDef):\n args = [name.id for name in node.args.args]\n for arg in args:\n names[arg] = cast.VocabLiteral(arg)\n body = [_generate_coral_ast(b, names) for b in node.body]\n expr = cast.LabelingFunction(body, args)\n return expr\n if isinstance(node, ast.Return):\n return cast.Return(_generate_coral_ast(node.value, names))\n if isinstance(node, ast.If):\n cond = _generate_coral_ast(node.test, names)\n true_branch = _generate_coral_ast(node.body[0], names)\n expr = cast.IfThen(cond, true_branch)\n vprint(expr)\n return expr\n if isinstance(node, ast.Compare):\n left = _generate_coral_ast(node.left, names)\n right = _generate_coral_ast(node.comparators[0], names)\n op = node.ops[0]\n if isinstance(op, ast.Eq):\n expr = cast.Equal(left, right)\n vprint(expr)\n return expr\n elif isinstance(op, ast.Gt):\n expr = cast.GreaterThan(left, right)\n vprint(expr)\n return expr\n elif isinstance(op, ast.Lt):\n expr = cast.LessThan(left, right)\n vprint(expr)\n return expr\n elif isinstance(op, ast.LtE):\n expr = cast.LessThanOrEqual(left, right)\n vprint(expr)\n return expr\n elif isinstance(op, ast.GtE):\n expr = cast.GreaterThanOrEqual(left, right)\n vprint(expr)\n return expr\n if isinstance(node, ast.BinOp):\n if isinstance(node.op, ast.Add):\n expr = cast.Add(_generate_coral_ast(node.left, names), _generate_coral_ast(node.right,\n names))\n elif isinstance(node.op, ast.Mult):\n expr = cast.Multiply(_generate_coral_ast(node.left, names),\n _generate_coral_ast(node.right, names))\n if isinstance(node.op, ast.Sub):\n expr = cast.Subtract(_generate_coral_ast(node.left, names),\n _generate_coral_ast(node.right, names))\n vprint(expr)\n return expr\n if isinstance(node, ast.Name):\n if node.id == \"True\":\n expr = cast.TrueLabelLiteral()\n elif node.id == \"False\":\n expr = cast.FalseLabelLiteral()\n elif node.id == \"None\":\n expr = cast.AbstainLabelLiteral()\n else:\n expr = names[node.id]\n vprint(expr)\n return expr\n if isinstance(node, ast.Num):\n return cast.PythonLiteral(node.n)",
"def __init__(self, symbol, index):\n Expression.__init__(self, None)\n self.symbol = symbol\n self.index = index\n self.basetype = symbol.basetype\n\n if(index != None):\n #Call to element in array so change the basetype to the array's basetype\n self.basetype = symbol.basetype.basetype",
"def _ast_genast(tree, specific=None):\n if isinstance(tree, AST):\n if specific is not None:\n val = specific(tree)\n if val is not None:\n return val\n params = []\n for _, value in iter_fields(tree):\n params.append(ast_genast(value, specific))\n return Call(func=Name(tree.__class__.__name__, Load()), args=params, keywords=[])\n if isinstance(tree, list):\n elems = []\n for e in tree:\n elems.append(ast_genast(e, specific))\n return List(elems, Load())\n if tree is None or tree is True or tree is False:\n return NameConstant(tree)\n if isinstance(tree, (int, float, complex)):\n return Num(tree)\n if isinstance(tree, str):\n return Str(tree)\n return tree",
"def gen_compound_literal(self, expr: expressions.CompoundLiteral):\n # Alloc some room:\n ir_addr = self.emit_alloca(expr.typ)\n # ... and fill compound literal:\n self.gen_local_init(ir_addr, expr.typ, expr.init)\n return ir_addr",
"def _parse_array(\n value_expr: str, target_expr: str, ref_parts: List[str],\n a_type: mapry.Array, registry_exprs: Mapping[mapry.Class, str],\n auto_id: mapry.py.generate.AutoID, py: mapry.Py) -> str:\n uid = auto_id.next_identifier()\n\n item_parsing = _parse_value(\n value_expr=\"item_{uid}\".format(uid=uid),\n target_expr=\"target_item_{uid}\".format(uid=uid),\n ref_parts=ref_parts + [\"str(i_{uid})\".format(uid=uid)],\n a_type=a_type.values,\n registry_exprs=registry_exprs,\n auto_id=auto_id,\n py=py)\n\n return _PARSE_ARRAY_TPL.render(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n uid=uid,\n minimum_size=a_type.minimum_size,\n maximum_size=a_type.maximum_size,\n value_py_type=mapry.py.generate.type_repr(a_type=a_type.values, py=py),\n item_parsing=item_parsing).rstrip('\\n')",
"def irgen_assign(stmt, builder, table):\n lvalue = irgen_lvalue(stmt.exprs[0], builder, table)\n expr = irgen_expr(stmt.exprs[1], builder, table)\n builder.store(expr, lvalue)",
"def g(t1):\n if isinstance(t1, IdentExp) and self.st.has_key(t1.name):\n ninfo = self.st[t1.name]\n if ninfo[\"srcty\"] == \"vector\":\n if self.existMats and t1.name == n.exp.lhs.name:\n return ArrayRefExp(t1, IdentExp(\"i2\"))\n else:\n self.st[\"itrs\"][0].update({ninfo[\"len\"][0]: \"i1\"})\n return ArrayRefExp(t1, IdentExp(\"i1\"))\n elif ninfo[\"srcty\"] == \"matrix\":\n self.st[\"itrs\"][0].update({ninfo[\"len\"][0]: \"i1\"})\n self.st[\"itrs\"][1].update({ninfo[\"len\"][1]: \"i2\"})\n sub = s2t(\"exp\", ninfo[\"len\"][0] + \" * i2 + i1\")\n return ArrayRefExp(t1, sub)\n else:\n return t1\n else:\n return t1",
"def process_expression_ast(stmt_ast: ast.Expr, stmt_ast_parent_block):\n # first, add a reference from stmt_ast to its parent block\n stmt_ast.parent_block = stmt_ast_parent_block\n logger.log.info(f\"Instantiating a symbolic state for AST instance stmt_ast = {stmt_ast}\")\n # initialise empty list of symbols\n all_symbols: list = []\n # walk the ast to find the symbols used\n for walked_ast in ast.walk(stmt_ast):\n # extract information according to type\n if type(walked_ast) is ast.Name:\n all_symbols.append(walked_ast.id)\n \n # instantiate symbolic state\n logger.log.info(f\"Instantiating new StatementSymbolicState instance with symbols {all_symbols}\")\n symbolic_state: SymbolicState = StatementSymbolicState(all_symbols, stmt_ast)\n return symbolic_state",
"def build(self, python_type, op):\n collection = self.fold_args(op.arg, op)\n if collection:\n typename = python_type.__name__\n typ = (typename, collection.types)\n try:\n value = python_type(collection.values)\n except TypeError as e:\n raise ConstantError(f'TypeError: {e.args[0]}', op) from e\n elements = collection.elements\n self.push(_Constant(typ, value, elements, op))",
"def gen_type_assertion(var_name: str, ty: type) -> str:\n\n tys = type_str(ty)\n vars = [c for c in 'abcdefghijklmnop' if c != var_name][::-1]\n\n def helper(var_name, tys):\n tys = tys.strip()\n pre_bracket = tys.split(\"[\")[0].lower() # part before [ (or the entire string if no bracket\n ans = f\"type({var_name}) is {pre_bracket}\"\n if \"[\" in tys:\n inside = tys[tys.index(\"[\") + 1:-1]\n new_var = vars.pop()\n if pre_bracket == \"list\" or pre_bracket == \"set\":\n inside_check = helper(new_var, inside)\n # if \" and \" in inside_check:\n # inside_check = \"(\" + inside_check + \")\"\n ans += f\" and all({inside_check} for {new_var} in {var_name})\"\n elif pre_bracket == \"dict\":\n depth = 0\n for i, c in enumerate(inside):\n if c == \"[\":\n depth += 1\n elif c == \"]\":\n depth -= 1\n elif c == \",\" and depth == 0:\n break\n assert depth == 0 and c == \",\", \"Dict[(expecting comma inside)]\"\n key_var = vars.pop()\n key_check = helper(key_var, tys[:i])\n val_check = helper(new_var, tys[i + 1:])\n ans += f\" and all({key_check} and {val_check} for {key_var}, {new_var} in {var_name}.items())\"\n else:\n assert False, f\"Unknown type `{tys}`\"\n return ans\n\n return f\"assert {helper(var_name, tys)}, '{var_name} must be of type {tys}'\"",
"def generate_ast(\n source_code: str, source_id: int, contract_name: str\n) -> tuple[Settings, vy_ast.Module]:\n return vy_ast.parse_to_ast_with_settings(source_code, source_id, contract_name)",
"def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)"
]
| [
"0.53399026",
"0.5122241",
"0.51023453",
"0.50702715",
"0.5011889",
"0.49890527",
"0.49315548",
"0.49182403",
"0.48647463",
"0.48503703",
"0.48396584",
"0.48342964",
"0.48304525",
"0.4830084",
"0.482372",
"0.48142827",
"0.48136747",
"0.47997293",
"0.4798387",
"0.4798273",
"0.47811186",
"0.47331113",
"0.4723815",
"0.47154543",
"0.47109905",
"0.47056141",
"0.468072",
"0.46736333",
"0.465265",
"0.46491605"
]
| 0.65886825 | 0 |
Creates a shared matrix or vector using the given in_size and out_size. Inputs | def create_shared(out_size, in_size=None, name=None):
if in_size is None:
return theano.shared(np.zeros((out_size, ),dtype=theano.config.floatX), name=name)
else:
return theano.shared(np.zeros((out_size, in_size),dtype=theano.config.floatX), name=name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vector_to_matrix(input, output_shape):\n output_h, output_w = output_shape\n output = np.zeros(output_shape, dtype=input.dtype)\n for i in range(output_h):\n st = i*output_w\n nd = st + output_w\n output[i, :] = input[st:nd]\n # flip the output matrix up-down to get correct result\n output=np.flipud(output)\n return output",
"def CreateSubMatrix(U: numpy.ndarray, input_state: numpy.ndarray, output_state: numpy.ndarray) -> numpy.ndarray:\n\n in_eff_mode = numpy.nonzero(input_state)[0]\n out_eff_mode = numpy.nonzero(output_state)[0]\n\n row_submatrix = col_submatrix = []\n for coor_eff_mode in in_eff_mode:\n row_submatrix = numpy.append(row_submatrix, int(input_state[coor_eff_mode]) * [coor_eff_mode], axis=0)\n\n for coor_eff_mode in out_eff_mode:\n col_submatrix = numpy.append(col_submatrix, int(output_state[coor_eff_mode]) * [coor_eff_mode], axis=0)\n\n dim_U_st = len(row_submatrix)\n U_st = numpy.zeros((dim_U_st, dim_U_st), dtype=complex)\n for i in range(dim_U_st):\n for j in range(dim_U_st):\n coor_row = int(row_submatrix[i])\n coor_col = int(col_submatrix[j])\n U_st[i, j] = U[coor_col, coor_row]\n\n return U_st",
"def vector_to_matrix(input, output_shape):\n output_h, output_w = output_shape\n output = np.zeros(output_shape, dtype=input.dtype)\n for i in range(output_h):\n st = i * output_w\n nd = st + output_w\n output[i, :] = input[st:nd]\n # flip the output matrix up-down to get correct result\n output = np.flipud(output)\n return output",
"def __init__(self, input_size, output_size, pure=False):\n super(RSOM, self).__init__(input_size, output_size, pure)\n self.differences = np.zeros((output_size, input_size))",
"def make_block(self, in_size, out_size, **kwargs):\n raise NotImplementedError(\"Abstract\")",
"def create_matrix(size):\n total_size = size * size\n rand_matrix = np.reshape(\n np.random.choice(\n [0, 1], int(total_size), p=[0.9, 0.1]\n ),\n (size, size)\n )\n return rand_matrix",
"def _huge(filter_mat: np.ndarray,\n output_shape: Tuple[int, int]) -> np.ndarray:\n (N, M) = filter_mat.shape\n (P, Q) = output_shape\n (R, S) = (N + P - 1, M + Q - 1)\n dim = R * S\n huge_mat = np.zeros((dim, dim))\n # obnoxious naive loop\n for i in range(P):\n for j in range(Q):\n for k in range(N):\n for l in range(M):\n huge_mat[i * Q + j][(i + k) * S + (j + l)] = \\\n filter_mat[k][l]\n # we are in row echelon form! so we can just add a standard basis vector\n # for each pivot-less column:\n # http://math.stackexchange.com/questions/1530314/adding-linearly-independent-row-vectors-to-a-matrix\n row = P * Q\n for i in range(P):\n for j in range(Q, S):\n huge_mat[row][i * S + j] = 1.0\n row += 1\n for i in range(P, R):\n for j in range(S):\n huge_mat[row][i * S + j] = 1.0\n row += 1\n return huge_mat",
"def add_dense_layer(self, _input, in_size, out_size):\n weight = tf.Variable(tf.truncated_normal(\n [in_size, out_size], stddev=0.01))\n bias = tf.Variable(tf.constant(0.1, shape=[out_size]))\n return tf.matmul(_input, weight) + bias",
"def initialize_dense(input_size, output_size):\n # TODO: replace the initialization constant 0.01 in a more systematic approach\n return np.random.randn(output_size, input_size) * 0.01",
"def generate_matrix(size) -> np.ndarray:\n np.random.seed(1)\n return np.random.rand(size, size) - 0.5",
"def spzeros(size, device='cuda:0'):\n\n return SparseTensor(size=size).to(device=device)",
"def fun_in_and_out_same(self, nc1_index, core_index, h_per_core,\n h_out_index):\n if self.in_size_w*self.c_block_size*4 < UB_SIZE/2:\n ub_output = self.tik_instance.Tensor(\n \"float32\", (self.in_size_w, self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_output[0], self.grads_gm[(nc1_index*self.in_size_h +\n core_index*h_per_core +\n h_out_index)*self.in_size_w*16],\n 0, 1, self.in_size_w*2, 0, 0)\n self.tik_instance.data_move(\n self.output_gm[(nc1_index*self.out_size_h + core_index *\n h_per_core + h_out_index)*self.out_size_w*16],\n ub_output[0], 0, 1, self.out_size_w*2, 0, 0)\n else:\n w_size_ub = UB_SIZE // (2*4*self.c_block_size)\n ub_output = self.tik_instance.Tensor(\n \"float32\", (w_size_ub, self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n w_num_ub = _ceil_div(self.in_size_w, w_size_ub)\n if w_num_ub > 1:\n thread_num = 2\n else:\n thread_num = 1\n\n with self.tik_instance.for_range(\n 0, w_num_ub, thread_num=thread_num) as w_num_index:\n with self.tik_instance.if_scope(w_num_index != w_num_ub - 1):\n self.tik_instance.data_move(\n ub_output[0], self.grads_gm[\n ((nc1_index*self.in_size_h + core_index*h_per_core +\n h_out_index)*self.in_size_w + w_num_index *\n w_size_ub)*16], 0, 1, w_size_ub*2, 0, 0)\n self.tik_instance.data_move(\n self.output_gm[\n ((nc1_index*self.out_size_h + core_index*h_per_core\n + h_out_index)*self.out_size_w + w_num_index *\n w_size_ub)*16], ub_output[0], 0, 1, w_size_ub*2,\n 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_output[0],\n self.grads_gm[((nc1_index*self.in_size_h +\n core_index*h_per_core + h_out_index) *\n self.in_size_w + w_num_index*w_size_ub) *\n 16],\n 0, 1, (self.in_size_w - w_num_index*w_size_ub)*2, 0, 0)\n\n self.tik_instance.data_move(\n self.output_gm[((nc1_index*self.out_size_h +\n core_index*h_per_core + h_out_index) *\n self.out_size_w + w_num_index *\n w_size_ub)*16], ub_output[0], 0, 1,\n (self.in_size_w - w_num_index*w_size_ub)*2, 0, 0)",
"def get_warp_matrix(theta, size_input, size_dst, size_target):\n theta = np.deg2rad(theta)\n matrix = np.zeros((2, 3), dtype=np.float32)\n scale_x = size_dst[0] / size_target[0]\n scale_y = size_dst[1] / size_target[1]\n matrix[0, 0] = math.cos(theta) * scale_x\n matrix[0, 1] = -math.sin(theta) * scale_x\n matrix[0, 2] = scale_x * (-0.5 * size_input[0] * math.cos(theta) + 0.5 * size_input[1] * math.sin(theta) + 0.5 * size_target[0])\n matrix[1, 0] = math.sin(theta) * scale_y\n matrix[1, 1] = math.cos(theta) * scale_y\n matrix[1, 2] = scale_y * (-0.5 * size_input[0] * math.sin(theta) - 0.5 * size_input[1] * math.cos(theta) + 0.5 * size_target[1])\n return matrix",
"def create_matrix(sample_size, dim):\n return np.array(private_create_matrix(sample_size, dim, dim))",
"def gen_vector(size):\n solution = []\n for i in range(size):\n rand_num = uniform(-size, size)\n solution.append(rand_num)\n return np.array(solution)",
"def resize(self, in_size, out_size):\n scaled_origin, scaled_data = resize_origin_and_bitmap(self._origin, self._data, in_size, out_size)\n return MultichannelBitmap(data=scaled_data, origin=scaled_origin)",
"def get_vector(size):\n ret = lib.myarray_construct(size, size)\n return _asarray(ret.data, (ret.n_rows, ret.n_cols))",
"def __init__(self, inputSize, outputSize, hiddenSize): \n\n self.inputSize = inputSize\n self.outputSize = outputSize\n self.hiddenSize = hiddenSize \n \n # Initialize random weight with range [-0.5, 0.5]\n self.weight = np.matrix(np.random.uniform(-0.5, 0.5, (self.hiddenSize, self.inputSize)))\n\n # Initialize random bias with range [0, 1]\n self.bias = np.matrix(np.random.uniform(0, 1, (1, self.hiddenSize)))\n \n self.H = 0\n self.beta = 0",
"def apply_layer(inputs, out_size):\n in_size = inputs.get_shape()[0].value\n weights = tf.Variable(tf.random_normal([out_size, in_size],\n stddev=1/sqrt(in_size)))\n biases = tf.Variable(tf.zeros([64, 1]))\n return tf.matmul(weights, inputs) + biases",
"def new_shared_array(shape, typecode='d', ismatrix=False):\n typecode = np.dtype(typecode).char\n return multiprocessing.Array(typecode, int(np.prod(shape)), lock=False), tuple(shape), typecode, ismatrix",
"def __init__(self, state_size, action_size, memory_size=40, output_size=11): \n self.memory_size = memory_size\n self.output_size = output_size\n self.action_size = action_size\n self.state_size = state_size\n\n self.output_len = output_size * state_size + (output_size-1) * action_size\n\n # Once filled it will be a list of lists of [x,u] <- both tensors\n self.memory = [[np.zeros(state_size), np.zeros(action_size)]]*memory_size\n\n self.last_state = None\n self.last_augmented = None\n self.last_action = None",
"def identMatrix(size):\n returnvalue = Matrix()\n for i in range(size):\n newrow = [0] * size\n newrow[i] = 1\n returnvalue.addRow(*newrow)\n return returnvalue",
"def make_matrix(sizex, sizey):\n return [[0]*sizey for i in xrange(sizex)]",
"def __init__(self, size):\n _PysparseMatrixFromShape.__init__(self, rows=size, cols=size, bandwidth = 1)\n ids = numerix.arange(size)\n self.put(numerix.ones(size, 'd'), ids, ids)",
"def myWarpPerspectiveSparse(src, H, out_size):\n output = np.zeros(out_size)\n \n # Get all indices from the src matrix\n row, col = np.indices(src.shape[:2])\n \n # Store as x,y,1\n indices = [(c, r, 1) for r, c in zip(row.ravel(), col.ravel())]\n \n for idx in indices:\n new_idx = np.matmul(H, idx)\n new_idx = new_idx / new_idx[2]\n c = int(round(new_idx[0]))\n r = int(round(new_idx[1]))\n output[r, c,0] = src[idx[1], idx[0],0]\n output[r, c,1] = src[idx[1], idx[0],1]\n output[r, c,2] = src[idx[1], idx[0],2]\n \n return np.uint8(output[:out_size[0], :out_size[1]])",
"def make_matrix(sizex, sizey):\n return [[0] * sizey for i in range(sizex)]",
"def createCPUSharedArray(solver,arrayBytes):\n itemsize = int(solver.dtype.itemsize)\n #Creating MPI Window for shared memory\n win = MPI.Win.Allocate_shared(arrayBytes, itemsize, comm=solver.nodeComm)\n sharedBuffer, itemsize = win.Shared_query(0)\n solver.sharedArray = numpy.ndarray(buffer=sharedBuffer, dtype=solver.dtype.type, shape=solver.sharedShape)",
"def fc_layer(input, size_in, size_out):\n w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1))\n b = tf.Variable(tf.truncated_normal([size_out], stddev=0.1))\n return tf.nn.relu(tf.matmul(input, w) + b)",
"def __init__(self, in_size, out_size, kernel_size=3, stride=2, padding=1, output_padding=1):\n super().__init__()\n ConvTransBlockList = nn.ModuleList()\n ConvTransBlockList.append(nn.ConvTranspose2d(in_size, out_size,\n kernel_size=kernel_size, stride=stride,\n padding=padding, output_padding=output_padding,\n bias=False)\n )\n ConvTransBlockList.append(nn.InstanceNorm2d(out_size))\n ConvTransBlockList.append(nn.ReLU())\n self.model = nn.Sequential(*ConvTransBlockList)",
"def matrix_to_vector(input):\n input_h, input_w = input.shape\n output_vector = np.zeros(input_h*input_w, dtype=input.dtype)\n # flip the input matrix up-down because last row should go first\n input = np.flipud(input) \n for i,row in enumerate(input):\n st = i*input_w\n nd = st + input_w\n output_vector[st:nd] = row \n return output_vector"
]
| [
"0.5724623",
"0.57070524",
"0.5683393",
"0.55953485",
"0.54363215",
"0.54331064",
"0.53848374",
"0.53823334",
"0.5342572",
"0.5305575",
"0.52464527",
"0.52179205",
"0.51994514",
"0.51989526",
"0.5182485",
"0.5149679",
"0.5126486",
"0.5121966",
"0.5120387",
"0.51178217",
"0.5116339",
"0.5069887",
"0.5019043",
"0.50033486",
"0.49534434",
"0.49089885",
"0.49071935",
"0.49013695",
"0.48877567",
"0.48431218"
]
| 0.74856454 | 0 |
Execute the query in database. con is a SQLite connection and query is a string. Returns a dict with the query, the rows and column's names in the description item. | def execute(con, query):
c = con.cursor()
data = {}
data['query'] = query
data['rows'] = None
data["description"] = None
data['error'] = None
try:
data['rows'] = c.execute(query).fetchall() or None
data["description"] = c.description or None
except Exception as e:
data['error'] = str(e)
return json.dumps(data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def execute(query: str):\r\n try:\r\n global connection\r\n global cursor\r\n\r\n cursor.execute(query)\r\n records = cursor.fetchall()\r\n connection.commit()\r\n return records\r\n\r\n except sqlite3.Error as error:\r\n logger.error(f\"Error while connecting to sqlite: {error}\")",
"def _runQuery(self, query, connection):\n with connection as c:\n cursor = c.cursor()\n cursor.execute(query)\n results = []\n result = cursor.fetchone()\n while result is not None:\n numCols = len(result)\n item = {}\n for col in range(0, numCols):\n key = cursor.description[col][0]\n value = result[col]\n try:\n #handle oracle clob datatypes\n value = str(result[col].read())\n except AttributeError:\n pass\n item[key] = value\n #item = {cursor.description[col][0]: result[col] for col in range(0, numCols)}\n results.append(item)\n result = cursor.fetchone()\n return results",
"def query(self, query, *parameters, **kwparameters):\n cursor = self._cursor()\n try:\n self._execute(cursor, query, parameters, kwparameters)\n column_names = [d[0] for d in cursor.description]\n\n if self.grace_result:\n return [GraceDict(zip(column_names, row)) for row in cursor]\n else:\n return [zip(column_names, row) for row in cursor]\n finally:\n cursor.close()",
"def get_query(query):\n global database\n res = database.conn.execute(query)\n out = res.fetchall()\n return [dict(zip(i.keys(), i)) for i in out]",
"def query(self, query, dict_cursor=False):\n try:\n if dict_cursor:\n self.dict_cursor.execute(query)\n results = self.dict_cursor.fetchall()\n else:\n self.cursor.execute(query)\n results = self.cursor.fetchall()\n return results\n except MySQLdb.Error as e:\n self.connection.rollback()\n try:\n print(\"MySQL Error {}: {}\".format(e.args[0], e.args[1]))\n except IndexError:\n print(\"MySQL Error: {}\".format(str(e)))",
"def execute_query(query):\n c.execute(query)\n return c.fetchall()",
"def execute_query(cur, conn, query):\n try:\n cur.execute(query)\n rows = cur.fetchall()\n for row in rows:\n print(row)\n conn.commit()\n except Exception as e:\n print(e)",
"def executeQuery(query):\n c = db.cursor()\n c.execute(query)\n rows = c.fetchall()\n db.close()\n return rows",
"def query(self, sql):\n\n try:\n results = self.dbase.execute(sql)\n except sqlite3.OperationalError as error:\n return 'sqlite3.OperationalError', error.message\n else:\n return results",
"def execute_query(conn, query):\r\n cur = conn.cursor()\r\n cur.execute(query)\r\n conn.commit()\r\n return cur.fetchall()",
"def execute(self, query):\n with self.conn.cursor() as cur:\n # Execute the query\n try:\n cur.execute(query)\n except Exception as exc:\n print(\"Unable to execute query. Error was {0}\".format(str(exc)))\n exit()\n rows = cur.fetchall()\n return rows",
"def query(self, query):\n\n config = {\n 'user' : self.db_user,\n 'password' : self.db_pwd,\n 'database' : self.db_name,\n 'host' : self.db_host,\n 'unix_socket' : self.db_socket,\n 'port' : self.db_port,\n 'charset' : 'utf8'\n }\n\n try: \n cnx = mysql.connector.connect(**config)\n cursor = cnx.cursor(dictionary=True) # To return rows as dictionaries.\n\n cursor.execute(query[0], query[1])\n \n # Putting the result rows in an array.\n result = []\n for row in cursor:\n result.append(row)\n\n except mysql.connector.Error as err:\n return None\n else:\n cursor.close()\n cnx.close()\n return result",
"def get_data(self, sql_query):\n if self.client.is_closed:\n self.client.connect()\n\n cursor = self.client.connect()\n\n cursor.execute(sql_query)\n\n dict_to_return = self.convert_cursor_to_dict(cursor)\n\n cursor.close()\n self.client.close()\n\n return dict_to_return",
"def query(self, query: str, *args, **kwargs):\n cursor = self._cursor()\n try:\n self._execute(cursor, query, args, kwargs)\n column_names = [d[0] for d in cursor.description]\n return [Row(zip(column_names, row)) for row in cursor]\n finally:\n cursor.close()",
"def db_execute_query(db_connection, query, query_args):\n cursor = db_connection.cursor()\n #datalab_logger_connections.info(\"reading database[Query. May Take Time]...\")\n cursor.execute(query, query_args)\n #datalab_logger_connections.info(\"finish to query database\")\n return cursor",
"def execute_query(cls, query, value, dic=True):\n add_log(log_types[2], \"DBConnection\", \"values : \" + str(value) + \" query : \" + str(query))\n connection = cls.get_connection()\n try:\n cursor = connection.cursor(dictionary=dic)\n except mysql.ProgrammingError:\n add_log(log_types[0], \"DBConnection\", \"Get cursor so create new connection\")\n connection = cls.get_connection(new=True) # Create new connection\n cursor = connection.cursor(dictionary=True)\n try:\n cursor.execute(query, value)\n result = cursor.fetchall()\n except ProgrammingError as err:\n add_log(log_types[0], \"DBConnection\", \"ProgrammingError : \" + str(err))\n result = \"\"\n\n cursor.close()\n return result",
"def run_query(db, query, multi=False):\n result = []\n connection = db.engine.connect()\n rows = connection.execute(text(query))\n for c in rows:\n if multi:\n result.append(dict(c.items()))\n else:\n result.append(dict(c))\n connection.close()\n\n return result",
"def run_query(conn, query):\n\tcur = conn.cursor()\n\tcur.execute(query)\n\trows = cur.fetchall()\n\treturn rows",
"def query(self, statement: str, parameters: Dict = None):\n logger.debug(\n 'query(): statement: %s, parameters: %s', statement, parameters\n )\n cursor = self.conn.cursor()\n if parameters is None:\n parameters = {}\n try:\n cursor.execute(statement, parameters)\n except ValueError as value_error:\n logger.exception(value_error)\n logger.debug(\"Exception with: %s\", statement)\n except sqlite3.Error as exception:\n logger.exception(exception)\n logger.debug(\"Exception with: %s\", statement)\n\n rows = cursor.fetchall()\n\n cursor.close()\n return rows",
"def execute(self, qry):\n def internal():\n print 'qry = ', qry\n self._cur = self.get_cursor()\n print 'self._cur = ', self._cur\n self._cur.execute(qry)\n # self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor).execute(qry)\n rows = self._cur.fetchall()\n return rows\n\n return self._retry(internal)",
"def run_query(cursor, connection, query):\n\n time_start = time.time()\n\n cursor.execute(query)\n\n connection.commit()\n\n print(\"Query:\", end=\" \")\n\n print(query)\n\n try:\n rows = cursor.fetchall()\n\n print(\"Result:\", end=\" \")\n\n for row in rows:\n print(row)\n\n except psycopg2.Error as _:\n print(\"Query did not return any results.\")\n\n print(\"Execution time:\", time.time() - time_start, '\\n')",
"def execute_query(query):\n try:\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n result_table = c.fetchall()\n db.close()\n return result_table\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)",
"def runningwithqueries(query):\n print(\"\\nRunning Query: \" + str(query) + \"\\nResult :\\n\")\n crsr = cnxn.execute(query)\n columns = [column[0] for column in crsr.description]\n print(columns)\n for row in crsr.fetchall():\n print(row)\n crsr.close()",
"def execute(query):\n print query\n cursor.execute(query)",
"def query_database(db, sql_query):\n logging.info(f\"Querying database: {sql_query}\")\n con = connection_for_db(db)\n try:\n cur = con.execute(sql_query)\n dicts = cur.fetchall()\n return dicts\n except Exception as ex:\n db_name = Path(db).name\n raise Exception(\n f'Database `{db_name}` exception.' +\n f'Ensure the DB table exists.\\n{str(ex)}'\n )",
"def query(sql):\n if (sql is None):\n raise Exception(\"SQL not specified\") \n try:\n database = App.instance().environment.database\n connection = psycopg2.connect(host=database.host, dbname=database.database, \n user=database.user, password=database.password)\n cursor = connection.cursor()\n cursor.execute(sql)\n fields = [ x[0] for x in cursor.description]\n return (fields, cursor.fetchall())\n except(Exception, psycopg2.DatabaseError) as error:\n print(\"Error connecting to database\", error)\n finally:\n if not connection is None:\n cursor.close()\n connection.close()",
"def run_query(query):\n conn = connection.get_db_connection()\n cursor = conn.cursor()\n cursor.execute(query)\n return cursor",
"def execute_query(self, query):\n with self.db_engine.connect() as conn:\n try:\n result = conn.execute(query)\n\n except Exception as e:\n logger.error(e)\n\n result = None\n \n return result",
"def execute_query(query):\n\n db = psycopg2.connect(database=\"news\")\n cursor = db.cursor()\n cursor.execute(query)\n query_result = cursor.fetchall()\n db.close()\n return query_result",
"def run_query(query):\r\n db = psycopg2.connect('dbname=' + database)\r\n connect = db.cursor()\r\n connect.execute(query)\r\n rows = connect.fetchall()\r\n db.close()\r\n return rows"
]
| [
"0.6735245",
"0.667364",
"0.6655871",
"0.65157926",
"0.64183664",
"0.6412074",
"0.64018434",
"0.6384665",
"0.63658595",
"0.63307995",
"0.63086003",
"0.6293174",
"0.62854195",
"0.62713844",
"0.6236288",
"0.6202163",
"0.6181412",
"0.61524445",
"0.61452717",
"0.6130225",
"0.61295176",
"0.6119763",
"0.61093724",
"0.60892826",
"0.6088044",
"0.6066864",
"0.6052221",
"0.60505193",
"0.6048709",
"0.6034051"
]
| 0.72614986 | 0 |
Will attempt to parse out the title of a movie given a file name that contains the movie title. Most useful for torrented movies that follow a particular naming scheme | def parse_movie_title(file_name):
movie_name = os.path.basename(file_name)
movie_name = parsers.remove_extension(file_name)
movie_name = movie_name.lower()
movie_name = parsers.fix_word_seperators(movie_name)
movie_name = parsers.remove_tags(movie_name)
movie_name = parsers.remove_resolution(movie_name)
movie_name = parsers.remove_keywords(movie_name)
movie_name = parsers.remove_year(movie_name)
movie_name = parsers.remove_trailing_symbols(movie_name)
movie_name = parsers.remove_trailing_crap(movie_name)
movie_name = parsers.fix_the_at_the_end(movie_name)
movie_name = parsers.fix_a_at_the_end(movie_name)
movie_name = parsers.remove_double_spaces(movie_name)
movie_name = movie_name.strip()
movie_name = parsers.recapitalize(movie_name)
return movie_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_manga_title(filename):\n print_info('Attempting to parse manga title from {0}'.format(filename))\n for regex in MANGA_TITLE_REGEX:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_title = m.group('Series')\n return clean_episode_title(extracted_title)\n return ''",
"def parse_episode_title(filename):\n print_info('Attempting to parse episode title from {0}'.format(filename))\n for regex in EPISODE_TITLE_REGEX:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_title = m.group('EpisodeTitle')\n return clean_episode_title(extracted_title)\n return ''",
"def SongTitle( path ):\n p = subprocess.Popen( ['ffprobe',path], stderr=subprocess.PIPE )\n\n output = p.communicate()[1].decode()\n if 'Invalid data found' in output:\n return None\n\n # find the first occurance of \"title : stuff\" with any number of spaces.\n res = re.search( r'title\\s+:\\s+([a-zA-Z0-9,\\(\\) ]+)', output )\n\n if res is None:\n return \"\"\n\n ret = res.group(1)\n\n return ret",
"def parse_anime_episode_title(filename):\n print_info('Attempting to parse episode title from {0}'.format(filename))\n for regex in ANIME_EPISODE_TITLE_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_title = m.group('EpisodeTitle')\n return clean_episode_title(extracted_title)\n return ''",
"def parse_name_movie(soup, pageurl):\n\t# find the summary class header\n\tname_tag = soup.findAll('th', {'class': 'summary'})\n\t# if this header doesn't exist, cannot retrieve name\n\tif len(name_tag) == 0:\n\t\tlogging.warn('' + pageurl + 'does not have a valid name field, parsing terminated')\n\t\treturn None\n\t# return name as a string\n\treturn name_tag[0].get_text()",
"def fetch_title(self, movie_id):\n movie = tmdbsimple.Movies(movie_id)\n request = movie.info()\n\n return movie.title",
"def get_title_name_year(self) -> Tuple[str, str]:\n r = self.session.get(f\"https://www.imdb.com/title/{self.imdb}\")\n if r.status_code != 200:\n raise ValueError(f\"An unexpected error occurred getting IMDB Title Page [{r.status_code}]\")\n imdb_page = html.unescape(r.text)\n imdb_title = re.search(\n # testing ground: https://regex101.com/r/bEoEDn/1\n r\"<title>(?P<name>.+) \\(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)\"\n r\"(?P<year>(\\d{4})(|– |–\\d{4})))\\) - IMDb</title>\",\n imdb_page\n )\n if not imdb_title:\n raise ValueError(f\"Could not scrape Movie Title or Year for {self.imdb}...\")\n return imdb_title.group(\"name\").strip(), imdb_title.group(\"year\").strip()",
"def movies_filename(self):\n pass",
"def findByFileName(fileName, get_movie_name_only = False):\r\n MovieName = 'MovieName'\r\n queryresult = None\r\n\r\n movienamequery = IOpenSubtitlesProvider.FormatQuery(\r\n {'query' : fileName})\r\n try:\r\n #send the query\r\n queryresult = IOpenSubtitlesProvider.GetServer().SearchSubtitles(\r\n IOpenSubtitlesProvider.GetToken(), \r\n [movienamequery])\r\n queryresult = queryresult['data']\r\n except Exception as eX:\r\n WriteDebug(eX)\r\n\r\n if queryresult:\r\n is_episode = queryresult[0]['MovieKind'] == 'episode'\r\n if get_movie_name_only: \r\n if is_episode:\r\n res = queryresult[0][MovieName]\r\n queryresult = Utils.getregexresults('\"(.*?)\"', res)[0]\r\n else:\r\n queryresult = queryresult[0][MovieName]\r\n else: queryresult = queryresult\r\n else:\r\n queryresult = None\r\n\r\n return queryresult",
"def get_title(f):\n return os.path.basename(f)",
"def clean_episode_title(filename):\n new_str = filename.replace('_', ' ').replace('-', ' ')\n return re.sub(r'\\s+', ' ', new_str).strip()",
"def crawl_by_title(movie_name, verbose, year=None, parent_pbar=None):\n def _print(msg):\n if verbose:\n if parent_pbar is not None:\n parent_pbar.set_description(msg)\n parent_pbar.refresh()\n sys.stdout.flush()\n tqdm()\n else:\n print(msg)\n\n os.makedirs(_IMDB_DIR_PATH, exist_ok=True)\n file_name = _parse_name_for_file_name(movie_name) + '.json'\n file_path = os.path.join(_IMDB_DIR_PATH, file_name)\n if os.path.isfile(file_path):\n _print('{} already processed'.format(movie_name))\n return _result.EXIST\n\n # _print(\"Extracting a profile for {} from IMDB...\".format(movie_name))\n try:\n props = crawl_movie_profile(movie_name, year)\n # _print(\"Profile extracted succesfully\")\n # _print(\"Saving profile for {} to disk...\".format(movie_name))\n with open(file_path, 'w+') as json_file:\n # json.dump(props, json_file, cls=_RottenJsonEncoder, indent=2)\n json.dump(props, json_file, indent=2)\n _print(\"Done saving a profile for {}.\".format(movie_name))\n return _result.SUCCESS\n except Exception as exc:\n _print(\"Extracting a profile for {} failed\".format(movie_name))\n # traceback.print_exc()\n return _result.FAILURE\n # print(\"Extracting a profile for {} failed with:\".format(movie_name))\n # raise exc",
"def __calculate_title(video_data):\n title = 'Unknown'\n if 'fulltitle' in video_data.keys():\n title = video_data['fulltitle']\n elif 'title' in video_data.keys():\n title = video_data['title']\n elif '_filename' in video_data.keys():\n title = video_data['_filename']\n return title",
"def parse_title(title):\n title = title.split(\" / \")\n return title",
"def getTitle(movieInfo):\n if \"title\" in movieInfo:\n #We remove the punctuation\n title = \"\".join(c for c in movieInfo[\"title\"] if c not in punctuation)\n #We return the title as a list of words in the right format\n return [ _format(w) for w in title.split() ]\n else:\n raise AttributeError(\"%s instance has no attribute title\" % movieInfo)",
"def _title(self, path):\n title = os.path.basename(os.path.splitext(path)[0])\n return title",
"def get_title(line):\n title = line.split(' (')[0]\n return title",
"def get_film_name(film) -> str:\n return film.split('/')[-1]",
"def getFilmByProcessedName(Name):\n try:\n connection = connect()\n with connection.cursor() as cursor:\n cursor.execute(\"\"\"SELECT FilmID, Title FROM `films` WHERE TitlePP = %s\"\"\", (Name))\n\n return cursor.fetchone()\n except Exception as e:\n print(\"Error getting film id by similar name, with name {}\".format(Name), str(e))\n finally:\n connection.close()",
"def parse_title(title, various):\n if various and \" - \" in title:\n title = title.split(\" - \", 1)[1]\n return RE_FEAT.sub(\"\", title).rstrip()",
"def get_title(content):\n content = content[:100000]\n pa = re.compile(\"<title.*?>(.*?)<\\/title>\", re.DOTALL | re.IGNORECASE)\n match = re.search(pa, content)\n title = \"\"\n if match != None:\n title_found = match.group(1)\n title = title_found.replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\"\\t\", \" \")\n return title",
"def _parse_title(self, item):\n title = item[\"Title\"]\n return title",
"def title(self):\n if self.file_name is None:\n return None\n else:\n fname = os.path.split(self.file_name)[-1]\n fname, *ext = fname.rsplit('.', 1)\n procgen = ext and ext[0] in ('json', 'yaml')\n if procgen and self._seed and self._seed.spawn_key:\n # Append the spawn key as the episode number\n fname += '-e' + str(self._seed.spawn_key[-1])\n return fname",
"def parse_artist_title(line, match, result):\n n = re.search(\"[a-z][A-Z]\", match.group(2))\n if n is not None:\n index = line.find(n.group(0)) + 1\n result[\"artist\"] = line[:index].replace(match.group(1), \"\")\n result[\"title\"] = line[index:]",
"def _get_video_name(self, fname):\n csv_name_split = fname.split(\"_\")\n thirty_fps_loc = csv_name_split.index(\"30fps\")\n video_name = \"_\".join(csv_name_split[0:thirty_fps_loc+1])\n return video_name",
"def get_movie(movie_name):\n movie_name = movie_name.replace('_', ' ')\n for movie in movies_data:\n if movie.lower() == movie_name.lower():\n return make_response(jsonify(movies_data[movie]), 200)\n\n abort(404)",
"def get_title(self):\n\n if self.title: return self.title\n path = self.get_path()\n if str(path) == \"\": \n Settings.err_print(\"missing file title\")\n return \"\"\n title, ext = os.path.splitext(path)\n self.ext = ext\n self.title = \"{}{}\".format(os.path.basename(title), ext)\n return self.title",
"def get_year_from_movielist_title(title):\n match = re.match(r'.*\\s+\\((\\d+)\\)', title)\n year = int(match.groups()[0])\n return year",
"def test_parse_fasta_title_03():\n seq_name, seq_end = blast.parse_fasta_title(\n 'title1/2 after', 'end_2', '2')\n assert seq_name == 'title1'\n assert seq_end == '2'",
"def process_title(self, title):\n\t\t# strip apostrophes\n\t\tif '\\'' in title:\n\t\t\ttitle = re.sub('\\'', '', title)\n\t\tif '.' in title:\n\t\t\ttitle = re.sub('.', '', title)\n\t\treturn title"
]
| [
"0.7444756",
"0.7063565",
"0.70005333",
"0.68266726",
"0.66763353",
"0.6623276",
"0.6516655",
"0.6300323",
"0.6278855",
"0.62699896",
"0.6266796",
"0.6234666",
"0.62332964",
"0.6214932",
"0.6211282",
"0.6099446",
"0.60892713",
"0.6073653",
"0.6051425",
"0.60418546",
"0.59744674",
"0.596531",
"0.5934437",
"0.59082276",
"0.5901897",
"0.58774835",
"0.58656645",
"0.5860356",
"0.58527476",
"0.5839972"
]
| 0.86128676 | 0 |
Retrieve wall time from cloudinit metadata. CloudInit saves all passed metadata into a cloudinit.sources.DataSource object with attribute 'metadata' A DataSourceOpenStack object stores information on additional metadata in a subdictionary 'meta'. | def get_times():
try:
with open("/var/lib/cloud/instance/obj.pkl", "r") as file_:
data = pickle.load(file_)
except IOError:
return
meta = data.metadata.get("meta")
if meta is None:
raise EnvironmentError("Wrong virtualization environment.")
keys = [x for x in meta.keys() if re.search(".*Wall.*Time", x, re.IGNORECASE)]
if len(keys) != 1:
if len(keys) == 0:
raise ValueError("No meta-data entry with key 'WallTime'")
else:
raise ValueError("Ambiguous meta-data found: %s" % keys)
walltime = int(meta.get(keys[0]))
starttime = int(os.stat("/var/lib/cloud/instance/obj.pkl").st_ctime)
return walltime, starttime | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wall_time(self):",
"def _update_metadata_date(self, properties):\n if \"core\" not in properties:\n properties.core = Struct()\n properties.core.metadata_date = self._backend.server_time_utc()",
"def meta_data(date):\n return {'sourceDate': datetime.strptime(date, '%a, %d %b %Y %H:%M:%S %Z').replace(tzinfo=UTC).isoformat()}",
"def __init__(self, time, metadata):\n self.time = time\n self.metadata = metadata",
"def add_metadata(data):\n data[\"last_downloaded\"] = util.utc_now()\n return data",
"def _fetch_current_remote_metadata(conn):\n content = _get(conn, REMOTE_METADATA_FILE)\n metadata = json.loads(content) if content else {}\n return metadata",
"def time(self):\n return self.raw[\"logTime\"]",
"def metadata(self, timestamp=None):\n\n records = {}\n for k in self.file.root.photons.photontable.attrs._f_list('user'):\n data = getattr(self.file.root.photons.photontable.attrs, k)\n try:\n data = data.get(timestamp, preceeding=True)\n records[k] = data\n except AttributeError:\n records[k] = data\n except ValueError:\n pass # no data\n\n return records",
"def get_metadata(self, source, graph):\n return self.server.get_metadata(source, self.graphs.get(graph))",
"def _get_model_metadata(self, cube):\n init_coord = cube.coord(self.forecast_ref_time)\n init_dates = [cube_time_converter(time, init_coord.units) \n for time in set(init_coord.points)]\n \n time_coord = cube.coord(self.time_coord)\n fcst_dates = [cube_time_converter(time, time_coord.units) \n for time in time_coord.points]\n \n area_bounds = self._area_inst.get_cube_area_bounds(cube, \n self.xy_coords)\n x_bounds = [area_bounds[self._area_inst.x_min], \n area_bounds[self._area_inst.x_max]]\n y_bounds = [area_bounds[self._area_inst.y_min], \n area_bounds[self._area_inst.y_max]]\n \n metadata = {} \n metadata['VARIABLE'] = cube.name()\n metadata['UNITS'] = str(cube.units)\n metadata['INITIALISATION_DATES'] = init_dates\n metadata['MEMBERS'] = len(cube.coord(self.realization).points)\n metadata['FORECAST_DATES'] = fcst_dates\n metadata[self.xy_coords[0].upper()+'_BOUNDS'] = x_bounds\n metadata[self.xy_coords[-1].upper()+'_BOUNDS'] = y_bounds\n \n # Find additional coordinates in cube and add them to metadata.\n for coord in cube.coords():\n if coord.name() not in self.unwanted_coords and \\\n coord.name() not in self._required_coords and \\\n coord.name() not in self.xy_coords:\n metadata[coord.name().upper()] = coord.points\n \n return metadata",
"def _get_obs_metadata(self, cube):\n time_coord = cube.coord(self.time_coord)\n dates = [cube_time_converter(time, time_coord.units) \n for time in time_coord.points]\n \n area_bounds = self._area_inst.get_cube_area_bounds(cube, \n self.xy_coords)\n x_bounds = [area_bounds[self._area_inst.x_min], \n area_bounds[self._area_inst.x_max]]\n y_bounds = [area_bounds[self._area_inst.y_min], \n area_bounds[self._area_inst.y_max]]\n \n metadata = {} \n metadata['VARIABLE'] = cube.name()\n metadata['UNITS'] = str(cube.units)\n metadata['DATES'] = dates\n metadata[self.xy_coords[0].upper()+'_BOUNDS'] = x_bounds\n metadata[self.xy_coords[-1].upper()+'_BOUNDS'] = y_bounds\n \n # Find additional coordinates in cube and add them to metadata.\n for coord in cube.coords():\n if coord.name() not in self.unwanted_coords and \\\n coord.name() not in self._required_coords and \\\n coord.name() not in self.xy_coords:\n metadata[coord.name().upper()] = coord.points\n \n return metadata",
"def GetMetadata(self):\n return self.dict['meta']",
"def get_time(self):\n return self.run_command('get_time')[0]",
"def get_time_info(self):\n\n raise NotImplementedError",
"def get_time(self):\n return self.block.create_time",
"def get_metadata():\n\n metadata = []\n current_date = (datetime.date.today(),)\n\n # make sql connection\n # execute query\n with sql_cursor() as cursor:\n try:\n cursor.execute('USE goggles')\n cursor.execute('SELECT b.image_name, b.X_Min, b.Y_Min, b.X_Max, b.Y_Max, '\n 'b.init_vector, b.goggles from BBOX AS b, IMAGE as i where '\n 'b.image_name=i.image_name and i.image_date=%s and b.goggles=False', current_date)\n\n for (image_name, x_min, y_min, x_max, y_max, init_vector, goggles) in cursor:\n metadata.append({'image_name': image_name,\n 'x_min': float(x_min),\n 'y_min': float(y_min),\n 'x_max': float(x_max),\n 'y_max': float(y_max),\n 'init_vector': init_vector\n })\n except Exception as e:\n print(e)\n\n with open(METADATA_FILE, 'w') as meta_file:\n json.dump(metadata, meta_file)\n return metadata",
"def wind_meta(self):\n if self._wind_meta is None:\n with Resource(self.wind_fpath) as res:\n self._wind_meta = res.meta\n return self._wind_meta",
"def get_metadata(self):\n\n\t\t#see redcap api documentation -- https://redcap.wustl.edu/redcap/srvrs/prod_v3_1_0_001/redcap/api/help/\n\t\tbuf = io.BytesIO()\n\n\t\tfields = {\n\t\t 'token': config['api_token'],\n\t\t 'content': 'metadata',\n\t\t 'format': 'json'\n\t\t}\n\n\t\tch = pycurl.Curl()\n\t\tch.setopt(ch.URL, config['api_url'])\n\t\tch.setopt(ch.HTTPPOST, list(fields.items()))\n\t\tch.setopt(ch.WRITEFUNCTION, buf.write)\n\t\tch.perform()\n\t\tch.close()\n\n\t\tmetadata = json.loads(buf.getvalue().decode())\n\t\tbuf.close()\n\t\treturn metadata",
"def wind_meta(self):\n return self.data.wind_meta",
"def _get_metadata(self): \n metadata = {'DATA_TYPE':'Forecast Data'} \n \n cube_metadata = self._get_model_metadata(self.cube)\n \n self.cube_init_dates = cube_metadata['INITIALISATION_DATES']\n del cube_metadata['INITIALISATION_DATES']\n \n self.cube_dates = cube_metadata['FORECAST_DATES']\n del cube_metadata['FORECAST_DATES']\n \n for key, val in cube_metadata.items():\n # Find unique metadata which has not already been added by \n # previous cubes. Years are the common one.\n current_vals = metadata.get(key)\n if current_vals is not None:\n for this_val in current_vals:\n if hasattr(this_val, '__iter__'): \n try: \n if numpy.array_equal(this_val, val):\n break\n except AttributeError:\n # If the array type is not comparable for \n # example array of strings.\n equal = True\n for this_item, item in zip(this_val, val):\n if this_item != item:\n equal = False\n break\n if equal:\n break\n else:\n if this_val == val:\n break\n metadata[key].append(val)\n else:\n metadata[key] = [val]\n \n bound_names = []\n # Tidy up list of length 1.\n for key, val in metadata.items():\n if type(val) == list and len(val) == 1:\n metadata[key] = val[0]\n # Retrieve the exact bound names.\n if key[-7:] == '_BOUNDS':\n bound_names.append(key)\n \n metadata['INITIALISATION_DATES'] = [date.strftime('%d/%m/%Y') \n for date in \n self.cube_init_dates]\n metadata['FORECAST_DATES'] = [date.strftime('%d/%m/%Y') \n for date in self.cube_dates]\n\n return self.MetaData(metadata, bound_names)",
"def get(self):\n\n if self._metadata is None:\n raise ValueError('must call \\'refresh\\' method first')\n\n return self._metadata",
"def get_weather(self, time=None, location=None):\n req = requests.get(self.source_url)\n text = req.text\n moment = self.extract_datetime(text)\n met_data = self.parse_hms_data(text)\n met_data['time'] = moment\n met_data['text'] = text\n return self.source_label, met_data",
"def _make_meta(self):\n available_meas_times = list()\n available_intervals = list()\n drill_by = list()\n related = list()\n last_data_set_instance = dict()\n\n if self._data['report_save_historical_instances_ind'] == 'Y':\n # last measurement instance\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\n LIMIT 0, 1\"\"\",(self._id, self._segment_value_id))\n if res:\n last_data_set_instance = self._db.record[0]\n last_data_set_instance['measurement_time'] = self._formatter.format_date(last_data_set_instance['measurement_time'])\n\n # available measurement instances\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\"\"\",(self._id, self._segment_value_id))\n if res:\n for data_set_instance in self._db.record:\n data_set_instance['measurement_time'] = self._formatter.format_date(data_set_instance['measurement_time'])\n available_meas_times.append(data_set_instance)\n \n\n # get drill by. not for this version\n\n # available measurement intervals\n if self._data['report_primary_shared_dimension_id'] is None:\n self._data['report_primary_shared_dimension_id'] = 0\n\n self._db.Query(\"\"\"\n SELECT measurement_interval.*,\n dashboard_element.element_id\n FROM dashboard_element\n LEFT JOIN measurement_interval\n ON measurement_interval.measurement_interval_id = dashboard_element.measurement_interval_id\n WHERE\n (dashboard_element.`element_id`<>%s\n AND dashboard_element.measurement_interval_id <> %s\n AND dashboard_element.shared_measure_id = %s\n AND dashboard_element.`type` = 'internal report'\n AND ifnull(dashboard_element.report_used_for_drill_to_ind,'N') = %s\n AND ifnull(dashboard_element.report_primary_shared_dimension_id,0) = %s\n AND ifnull(dashboard_element.segment_id,0) = %s)\n OR\n dashboard_element.`element_id`=%s\n AND 3=4\n \n GROUP BY measurement_interval.measurement_interval_id\n ORDER BY\n measurement_interval.display_sequence,\n dashboard_element.name ASC\n \"\"\",\n (self._id,\n self._data['measurement_interval_id'],\n self._data['shared_measure_id'],\n self._data['report_used_for_drill_to_ind'],\n self._data['report_primary_shared_dimension_id'],\n self._data['segment_id'],\n self._id))\n\n\n for interval in self._db.record:\n interval['report_data_set_instance_id'] = 0\n available_intervals.append(interval)\n\n # see related\n self._db.Query(\"\"\"SELECT e.*\n FROM dashboard_element_topic det, dashboard_element e\n WHERE e.element_id = det.dashboard_element_id\n AND dashboard_element_id <> %s\n AND e.enabled_ind = 'Y'\n AND topic_id IN (select topic_id from dashboard_element_topic where dashboard_element_id = %s)\n UNION SELECT e.*\n FROM dashboard_element e, metric_drill_to_report m\n WHERE m.metric_element_id = e.element_id\n AND m.report_element_id = %s\n AND e.enabled_ind = 'Y'\n AND ifnull(e.segment_id,0) = %s\n \"\"\", (self._id, self._id, self._id, self._data['segment_id']))\n \n\n for related_element in self._db.record:\n if not related_element['segment_id']:\n related_element['segment_id'] = 0\n if related_element['segment_id'] == self._data['segment_id']:\n related_element['segment_value_id'] = self._segment_value_id\n else:\n related_element['segment_value_id'] = 0\n related.append(related_element)\n\n # elements displayed on the page\n before_dataset = list()\n after_dataset = list()\n \n charts_before_dataset = list()\n charts_after_dataset = list()\n \n \n # dataset table\n dataset_el = OrderedDict()\n dataset_el['element_id'] = ''\n dataset_el['element_type'] = 'dataset'\n dataset_el['element_name'] = ''\n dataset_el['element_desc'] = ''\n dataset_el['placement'] = ''\n dataset_el['sequence'] = 0\n dataset_el['show_ind'] = self._data['show_data_set_table_in_report_ind']\n \n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND \n (ISNULL(report_data_set_pivot_id)\n OR report_data_set_pivot_id = 0) \n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n charts_before_dataset.append(chart_el)\n else:\n charts_after_dataset.append(chart_el)\n \n # pivots\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_pivot\n WHERE\n `element_id`= %s\n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for pivot in self._db.record:\n before_pivot = list()\n after_pivot = list()\n #pivot_element = list()\n \n pivot_el = OrderedDict()\n pivot_el['element_id'] = pivot['report_data_set_pivot_id']\n pivot_el['element_type'] = 'pivot'\n pivot_el['element_name'] = pivot['name']\n pivot_el['element_desc'] = ''\n pivot_el['placement'] = pivot['pivot_table_report_placement']\n pivot_el['sequence'] = pivot['display_sequence']\n pivot_el['show_ind'] = pivot['enabled_ind']\n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND report_data_set_pivot_id = %s \n ORDER BY display_sequence ASC\"\"\",\n (self._id, pivot_el['element_id']))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n before_pivot.append(chart_el)\n else:\n after_pivot.append(chart_el)\n pivot_element = before_pivot + [pivot_el] + after_pivot \n \n if pivot_el['placement'] == 'before data set':\n before_dataset += pivot_element\n else:\n after_dataset += pivot_element\n elements = charts_before_dataset + before_dataset + [dataset_el] + after_dataset + charts_after_dataset\n \n \n self._jfile.make_current_meta(last_data_set_instance,\n available_meas_times,\n available_intervals,\n drill_by,\n related,\n elements,\n self._segment_values)",
"def getConnectionTime(probe,since=None):\n\ttry:\n\t\tresult = {}\n\t\t\t\t\n\t\tconnectionResults = ProbeConnectionResult.objects.filter(test__log__probe = probe).prefetch_related('timecheck_set')\n\t\tif since != None:\n\t\t\tconnectionResults = connectionResults.filter(date__gte=since)\n\n\t\tssids = set(connectionResults.values_list('ssid', flat=True))\n\t\tfor ssid in ssids:\n\t\t\ttmp = ssid.replace(\".\",\"\").replace(\"-\",\"\")\n\t\t\tresult[tmp] = []\n\t\t\tssidResults = connectionResults.filter(ssid=ssid).order_by(\"date\")\n\t\t\tfor con in ssidResults:\n\t\t\t\tif con.timecheck_set.all().exists():\n\t\t\t\t\tresult[tmp].append({\"date\": timezone.localtime(con.date), \"connection\":con, \"times\":con.timecheck_set.all().order_by('step')})\n\n\n\t\treturn result\n\n\texcept:\n\t\treturn {}",
"def reply(self, timestamp):\r\n return MetaInfo(self.target, self.source, timestamp)",
"def get_system_date_and_time(self):\n return self.mycam.devicemgmt.GetSystemDateAndTime()",
"def _get_meas_times_web_service(self, last_meas_time):\n subst = ''\n if self._segment and self._segment_value:\n if self._segment['partition_value_type'] == 'int':\n subst = self._segment_value['value_int']\n elif self._segment['partition_value_type'] == 'varchar':\n subst = self._segment_value['value_varchar']\n data_fetch_command_bind_parameter = self._segment['data_fetch_command_bind_parameter']\n else:\n data_fetch_command_bind_parameter = ''\n subst = ''\n\n #meas_times = self._outer_conn.query(last_meas_time, data_fetch_command_bind_parameter, subst, 'get_meas_times', None)\n ret_data = self._outer_conn.query(last_meas_time, data_fetch_command_bind_parameter, subst)\n self._web_service_data = dict()\n meas_times = {'header':'meas_time', 'data': list()}\n for meas_time, meas_data in ret_data.iteritems():\n meas_times['data'].append([meas_time])\n self._web_service_data[meas_time] = meas_data \n \n return meas_times",
"def getTime(self, request, context):\n\t\t\n date = re.split(\"\\s\", datetime.utcnow().strftime(\"%Y %m %d %H %M %S\"))\n\n return droneconnect_pb2.Time(year = int(date[0]), month = int(date[1]), day = int(date[2]), hour = int(date[3]), minute = int(date[4]), second = int(date[5]))",
"def fetch_timestamp(self):\r\n return self.__public_request('GET', '/api/v1/timestamp')",
"def build_metadata(meta):\n\n ret = copy.copy(meta) if meta else dict()\n\n ret['name'] = meta.get('name', '')\n\n if 'index' in meta:\n if isinstance(meta.get('index'), str):\n ret['index'] = Index(meta.get('index'))\n elif isinstance(meta.get('index'), Index):\n ret['index'] = meta.get('index')\n\n ret['utc'] = True\n if 'utc' in meta and isinstance(meta.get('utc'), bool):\n ret['utc'] = meta.get('utc')\n\n return pmap(ret)"
]
| [
"0.5747218",
"0.5261601",
"0.5219078",
"0.518085",
"0.5073197",
"0.5068388",
"0.5050664",
"0.4977583",
"0.49501786",
"0.49469298",
"0.49465525",
"0.49380222",
"0.49379975",
"0.4932889",
"0.49007112",
"0.48968756",
"0.48864484",
"0.48787802",
"0.48739028",
"0.48662192",
"0.48500693",
"0.4819495",
"0.48169422",
"0.48134282",
"0.4793247",
"0.47775364",
"0.47629425",
"0.4762175",
"0.4740547",
"0.4719121"
]
| 0.6368934 | 0 |
Save walltime information to system variables WALLTIME & BOOTTIME. Values by default are stored in /etc/environment, discarding old wall/boottime entries, preserving the rest. | def save_env(wall_time_=None, start_time_=None, environment_file="/etc/environment"):
if not os.access(environment_file, os.W_OK):
raise EnvironmentError("Can't write to %s" % environment_file)
with open(name=environment_file, mode="r") as file_:
# keep results != WALLTIME/BOOTTIME
content = [entry for entry in file_.readlines() if re.match("(?:WALL|BOOT)TIME", entry, re.IGNORECASE) is None]
if wall_time_ is not None:
content.append("WALLTIME=%d\n" % wall_time_)
if start_time_ is not None:
content.append("BOOTTIME=%d\n" % start_time_)
with open(name=environment_file, mode="w") as file_:
file_.writelines(content) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_walltime(self):\n\n walltime = time.time() - self._start_time\n with open(self._walltime_path, 'w') as f:\n f.write(str(walltime) + \"\\n\")",
"def set_walltime(self, walltime: str) -> None:\n if not self.batch:\n raise SmartSimError(\"Not running as batch, cannot set walltime\")\n\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n self.batch_settings.set_walltime(walltime)",
"def set_system_time(cls, dispatcher, timestamp): # pragma: no cover\n\n if not cls.is_system_openwrt():\n return\n\n dispatcher.update_all_timers(timestamp - time.time())\n with open(os.devnull, \"w\") as dev_null:\n cls.logger.info(\"Setting system time to %s\",\n datetime.utcfromtimestamp(timestamp).strftime('%Y %b %d %H:%M:%S'))\n try:\n call([\"date\", \"+%s\", \"-s\", \"@\" + str(timestamp)],\n stdout=dev_null)\n except OSError:\n cls.logger.exception(\"Failed to set system time\")",
"def get_times():\n try:\n with open(\"/var/lib/cloud/instance/obj.pkl\", \"r\") as file_:\n data = pickle.load(file_)\n except IOError:\n return\n\n meta = data.metadata.get(\"meta\")\n if meta is None:\n raise EnvironmentError(\"Wrong virtualization environment.\")\n\n keys = [x for x in meta.keys() if re.search(\".*Wall.*Time\", x, re.IGNORECASE)]\n if len(keys) != 1:\n if len(keys) == 0:\n raise ValueError(\"No meta-data entry with key 'WallTime'\")\n else:\n raise ValueError(\"Ambiguous meta-data found: %s\" % keys)\n\n walltime = int(meta.get(keys[0]))\n starttime = int(os.stat(\"/var/lib/cloud/instance/obj.pkl\").st_ctime)\n return walltime, starttime",
"async def init_boot_time(self):\n await self.exec_cmd(self._parse_boottime_hostname,\n [\"show system uptime|display json\",\n \"show version\"], None, 'mixed')",
"def save_current_run_time():\n # path = \"/Users/szou/Downloads/bu/happydogs/analytics_happydogs/last_time_run\" # hard coding this due to CRON, but will remove later\n output_file = open(\"last_time_run\", \"w\")\n current_time_string = datetime.datetime.strftime(\n datetime.datetime.now(), \"%Y-%m-%d %H:%M:%S\"\n )\n output_file.write(current_time_string)\n print(current_time_string)\n output_file.close()",
"async def init_boot_time(self):\n await self.exec_cmd(self._parse_boottime_hostname,\n [\"cat /proc/uptime\", \"hostnamectl\",\n \"cat /etc/os-release\"], None, 'text')",
"def set_ntp_sysctl(self):\n print \"Modification du sysctl\"\n self.exec_cmd(\"echo \\\"xen.independent_wallclock = 1\\\" >> %s/etc/sysctl.conf\" % self.rep_vhosts_vm)",
"def getSystemAwake(self):\n print 'start of getSystemAwak() system_awake = {0}'.format(self.system_awake) # TESTING ++++++++++++++++\n try:\n self.db = shelve.open(os.path.join(self.xlocal, 'Launch Manager Utils\\\\launch.data'))\n if self.db['system_awake'] == False:\n print 'start of if true - getSystemAwak() system_awake = {0}'.format(self.system_awake) # TESTING ++++++++++++++++\n self.system_awake = self.db['system_awake']\n self.db.close()\n else:\n self.system_awake = True\n self.db['system_awake'] = self.system_awake\n self.db.close()\n \n print 'End of getSystemAwak() system_awake = {0}'.format(self.system_awake) # TESTING ++++++++++++++++\n \n except Exception, e:\n self.log_file.logEntry('{0}\\nUnable to load previous system_awake value, setting value to True'.format(e))\n self.system_awake = True",
"async def init_boot_time(self):\n await self.exec_cmd(self._parse_boottime_hostname,\n [\"cat /proc/uptime\", \"hostname\", \"show version\"], None, 'text')",
"def boot_time():\n # This dirty hack is to adjust the precision of the returned\n # value which may have a 1 second fluctuation, see:\n # https://github.com/giampaolo/psutil/issues/1007\n global _last_btime\n ret = float(cext.boot_time())\n if abs(ret - _last_btime) <= 1:\n return _last_btime\n else:\n _last_btime = ret\n return ret",
"def last_boot(self, value: datetime) -> None:\n self._data[ATTR_LAST_BOOT] = value.isoformat()",
"async def init_boot_time(self):\n await self.exec_cmd(self._parse_boottime_hostname,\n [\"show version|json\", \"show hostname\"], None,\n 'mixed')",
"async def init_boot_time(self):\n await self.exec_cmd(self._parse_boottime_hostname,\n [\"show version\", \"show run hostname\"], None)",
"def getruntime():\n global starttime, last_uptime, last_timestamp, elapsedtime, granularity, runtimelock\n \n # Get the lock\n runtimelock.acquire()\n \n # Check if Linux or BSD/Mac\n if ostype in [\"Linux\", \"Darwin\"]:\n uptime = os_api.get_system_uptime()\n\n # Check if time is going backward\n if uptime < last_uptime:\n # If the difference is less than 1 second, that is okay, since\n # The boot time is only precise to 1 second\n if (last_uptime - uptime) > 1:\n raise EnvironmentError, \"Uptime is going backwards!\"\n else:\n # Use the last uptime\n uptime = last_uptime\n \n # No change in uptime\n diff_uptime = 0\n else: \n # Current uptime, minus the last uptime\n diff_uptime = uptime - last_uptime\n \n # Update last uptime\n last_uptime = uptime\n\n # Check for windows \n elif ostype in [\"Windows\"]: \n # Release the lock\n runtimelock.release()\n \n # Time.clock returns elapsedtime since the first call to it, so this works for us\n return time.clock()\n \n # Who knows... \n else:\n raise EnvironmentError, \"Unsupported Platform!\"\n \n # Current uptime minus start time\n runtime = uptime - starttime\n \n # Get runtime from time.time\n current_time = time.time()\n \n # Current time, minus the last time\n diff_time = current_time - last_timestamp\n \n # Update the last_timestamp\n last_timestamp = current_time\n \n # Is time going backward?\n if diff_time < 0.0:\n # Add in the change in uptime\n elapsedtime += diff_uptime\n \n # Lets check if time.time is too skewed\n else:\n skew = abs(elapsedtime + diff_time - runtime)\n \n # If the skew is too great, use uptime instead of time.time()\n if skew < granularity:\n elapsedtime += diff_time\n else:\n elapsedtime += diff_uptime\n \n # Release the lock\n runtimelock.release()\n \n # Return the new elapsedtime\n return elapsedtime",
"def _get_boot_time():\r\n f = open('/proc/stat', 'r')\r\n try:\r\n for line in f:\r\n if line.startswith('btime'):\r\n return float(line.strip().split()[1])\r\n raise RuntimeError(\"line not found\")\r\n finally:\r\n f.close()",
"def wall_time(self):",
"def get_system_date_and_time(self):\n return self.mycam.devicemgmt.GetSystemDateAndTime()",
"async def init_boot_time(self):\n await self.exec_cmd(self._parse_boottime_hostname,\n [\"show version\"], None)",
"def setWakeUpByRTC(pWkeUpTimeEpoch):\n res = False\n # first check that we can access rtc\n if os.path.isfile(cons.TK_CTRL_WKUPF):\n try:\n # now write wakeup timer\n with open(cons.TK_CTRL_WKUPF, \"w\") as wakeFile:\n # write time\n wakeFile.write(str(pWkeUpTimeEpoch))\n # success\n res = True\n except:\n # we only care about this, at least for now, if it succeeds\n res = False\n # result\n return res",
"def set_clock():\n import package\n package.install(\"ntpdate\")\n sudo(\"ntpdate 0.fi.pool.ntp.org 1.fi.pool.ntp.org 2.fi.pool.ntp.org\")",
"def to_systime(self):\n try:\n dt_obj = duparser.parse(timestamp)\n micro = int(dt_obj.microsecond / 1000)\n full_date = dt_obj.strftime('%Y, %m, %w, %d, %H, %M, %S, ' + str(micro))\n stamp = []\n if sys.version_info >= (3, 0):\n for value in full_date.split(','):\n stamp.append(hexlify(struct.pack('<H', int(value))).decode('utf8'))\n elif sys.version_info < (3, 0):\n for value in full_date.split(','):\n stamp.append(hexlify(struct.pack('<H', int(value))))\n self.out_systemtime = ''.join(stamp)\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_systemtime = False\n return self.out_systemtime",
"def make_time_stamp_file():\n with open(TIME_STAMP_FILE_NAME, 'w') as f:\n f.write(datetime.datetime.now().strftime('%m/%d/%Y %I:%M%p'))",
"def from_systime(self):\n try:\n to_le = str(hexlify(unhexlify(systime)[::-1])).strip(\"b'\").strip(\"'\")\n converted = [to_le[i:i + 4] for i in range(0, len(to_le), 4)][::-1]\n stamp = []\n for i in converted:\n dec = int(i, 16)\n stamp.append(dec)\n dt_obj = dt(stamp[0], stamp[1], stamp[3], stamp[4], stamp[5], stamp[6], stamp[7]*1000)\n self.in_systemtime = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.in_systemtime = False\n return self.in_systemtime",
"def last_boot(self) -> datetime:\n boot_str = self._data.get(ATTR_LAST_BOOT, DEFAULT_BOOT_TIME)\n\n boot_time = parse_datetime(boot_str)\n if not boot_time:\n return datetime.utcfromtimestamp(1)\n return boot_time",
"def sleepTimeManager(self):\n days = {0:'monday', 1:'tuesday', 2:'wednesday', 3:'thursday', 4:'friday', 5:'saturday', 6:'sunday'}\n sleep_active = self.str2bool(self.xml_conf.find('active'))\n \n print 'sleep_active = {0}, is of type = {1}'.format(sleep_active, type(sleep_active)) #TESTING ++++++++++++++++++++++++++++++++++++\n \n start_time = str(self.xml_conf.find_attrib(days[datetime.datetime.today().weekday()], 'start'))\n stop_time = str(self.xml_conf.find_attrib(days[datetime.datetime.today().weekday()], 'stop'))\n \n print 'start_time = {0}, is of type = {1}'.format(start_time, type(start_time)) #TESTING ++++++++++++++++++++++++++++++++++++\n print 'stop_time = {0}, is of type = {1}'.format(stop_time, type(stop_time)) #TESTING ++++++++++++++++++++++++++++++++++++\n \n return sleep_active, start_time, stop_time",
"def system_time(self, system_time):\n\n self._system_time = system_time",
"def GetMonotime():\n return float(open(PROC_UPTIME).read().split()[0])",
"def configure_wwhr(dwelling):\n if dwelling.get('wwhr_systems'):\n for sys in dwelling.wwhr_systems:\n sys['pcdf_sys'] = get_wwhr_system(sys['pcdf_id'])",
"def formatWallTime(wallt):\n d=float(wallt)\n h=24*d ; m=(h-int(h))*60 ; s=(m-int(m))*60\n h=int(h) ; m=int(m) ; s=int(s)\n return '%d:%02d:%02d'%(h,m,s)"
]
| [
"0.7262268",
"0.6065319",
"0.59060913",
"0.58288807",
"0.56810534",
"0.5605353",
"0.5567082",
"0.5476224",
"0.5462885",
"0.545563",
"0.54304385",
"0.5336938",
"0.53221285",
"0.5315285",
"0.52749306",
"0.52722496",
"0.5256955",
"0.51557994",
"0.51556337",
"0.51247483",
"0.508376",
"0.50742817",
"0.5017057",
"0.5001419",
"0.49600235",
"0.4932302",
"0.49242198",
"0.49183857",
"0.48925328",
"0.48876065"
]
| 0.7155372 | 1 |
recs is array of Preverb objects | def write_preverbs(recs,fileout):
fout = codecs.open(fileout,'w')
n = 0
nadj=0
for rec in recs:
L = rec.L # headword record number
hw = rec.hw # the headword
pfx = rec.pfx # the preverb prefixes
pfxhw = rec.pfxhw
linenum = rec.linenum
out = "%s:%s:%s:%s:%s" %(L,hw,pfx,pfxhw,linenum)
fout.write(out + '\n')
n = n + 1
dumb_pfxhw = pfx + hw
if dumb_pfxhw != pfxhw:
nadj = nadj+1
outadj = "ADJUST %03d: %s:%s:%s:%s (dumb=%s)" %(nadj,L,hw,pfx,pfxhw,dumb_pfxhw)
try:
#print outadj.encode('utf-8')
pass
except :
print "ERROR PRINTING for line=",n,rec.line
fout.close()
print n,"records written to",fileout
print nadj,"prefixed verbs required sandhi adjustments" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_rec(filename):\n tree = et.parse(filename)\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['name'] = obj.find('name').text\n obj_struct['pose'] = obj.find('pose').text\n obj_struct['truncated'] = int(obj.find('truncated').text)\n obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,\n int(bbox.find('ymin').text) - 1,\n int(bbox.find('xmax').text) - 1,\n int(bbox.find('ymax').text) - 1]\n objects.append(obj_struct)\n\n return objects",
"def add(self, rec):\n\n q = rec.split()\n\n self.inds.append(int(q[0]))\n self.time.append(float(q[2]))\n self.tau.append(float(q[4]))\n self.rt['code'].append(int(q[6]))\n self.rt['str'].append(q[7])\n for i in range(3):\n self.n[i]['val'].append(int(q[9+i]))\n for i, k in enumerate(self.m.keys()):\n self.m[k].append(int(q[13+2*i]))\n self.mtm.append(int(q[21]))\n self.mtn.append(int(q[23]))\n self.cln.append(int(q[25]))\n self.score['fission']['num'].append(int(q[28]))\n self.score['fission']['val'].append(float(q[29]))\n self.score['fusion11']['num'].append(int(q[31]))\n self.score['fusion11']['val'].append(float(q[32]))\n self.score['fusion12']['num'].append(int(q[34]))\n self.score['fusion12']['val'].append(float(q[35]))\n self.score['fusion1L']['num'].append(int(q[37]))\n self.score['fusion1L']['val'].append(float(q[38]))",
"def Classify_Data(self):\n\n lem = lemmatization()\n\n # Get Mongo Client\n client = MongoClient()\n db = client['allMovies']\n collection = db['Movies']\n\n # Path to folder containing the training model files\n path = self.path\n\n # Get the list of doc ids trained\n trained_docs = []\n\n # Mongo queries to retrieve Horror, Romance and Crime movies\n qr1 = self.collection.find({\"content.genres.name\": \"Horror\"})\n qr2 = self.collection.find({\"content.genres.name\": \"Romance\"})\n qr3 = self.collection.find({\"content.genres.name\": \"Crime\"})\n qr4 = self.collection.find({\"content.genres.name\": \"Comedy\"})\n print(\"111\")\n print(qr3)\n\n myfile = open('doc_ids.pkl', 'rb')\n trained_docs = pickle.load(myfile)\n # Get 100 Horror, Romance and Crime movies each, which are not in the trained data set\n\n horr = []\n i = 0\n for rec in qr1:\n if rec['_id'] not in trained_docs:\n i = i + 1\n horr.append(rec)\n\n if i >= 333:\n break\n rom = []\n i = 0\n for rec in qr2:\n if rec['_id'] not in trained_docs:\n i = i + 1\n rom.append(rec)\n\n if i >= 333:\n break\n\n crime = []\n i = 0\n for rec in qr3:\n if rec['_id'] not in trained_docs:\n i = i + 1\n crime.append(rec)\n\n if i >= 334:\n break\n comedy = []\n i = 0\n for rec in qr4:\n if rec['_id'] not in trained_docs:\n i = i + 1\n comedy.append(rec)\n\n if i >= 334:\n break\n\n # Combine the query results\n query_results = []\n for rec in horr:\n query_results.append(rec)\n for rec in rom:\n query_results.append(rec)\n for rec in crime:\n query_results.append(rec)\n print(query_results)\n # Data to be classified\n test_data = []\n\n # Genres of records to be classified\n categories = []\n a = 0\n for movie in query_results:\n test_data.append(movie['content']['overview'])\n for genre in movie['content']['genres']:\n a = a + 1\n if ((genre['name'] == 'Horror') or (genre['name'] == 'Romance') or (genre['name'] == 'Crime') or (\n genre['name'] == 'Comedy') and a <= 80):\n categories.append(genre['name'])\n\n # Lists of training models and vectorizers\n models = [\"SVM\", \"LOGISTIC REGRESSION\", \"GAUSSIAN NB\",\n \"MULTINOMIAL NB\", \"BERNOULLI NB\", \"RANDOM FOREST\", \"BAGGING\", \"GRADIENT\",\n \"Voting\", \"Voting With Weights\"]\n\n vectorizers = [\"COUNT VECTORIZER\", \"TFIDF VECTORIZER\"]\n\n # Load dictionary containing terms appearing in genres\n dictionary = joblib.load(path + \"_Genre_Dictionary\")\n\n vec_1 = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n vec_2 = feature_extraction.text.TfidfVectorizer(vocabulary=dictionary)\n vec_list = [vec_1, vec_2]\n\n # List to store the classification stats for each model\n stats = []\n # Generate results\n for i in range(0, len(models)):\n for j in range(0, len(vectorizers)):\n time0 = time.process_time()\n model = joblib.load(path + models[i] + \"_\" + vectorizers[j].replace('-', '') + \".pkl\")\n vec = vec_list[j]\n Y = vec.fit_transform(test_data).toarray()\n print(\"y\", Y)\n predicted_genres = model.predict(Y)\n\n k = 0\n horror = 0\n romance = 0\n crime = 0\n\n # Keeps track of correct predictions\n y_correct = []\n\n # Keeps track of incorrect predictions\n y_predicted = []\n for pred in predicted_genres:\n if (categories[k] == \"Horror\"):\n if (pred == \"Horror\"):\n horror += 1\n y_predicted.append(0)\n elif (pred == \"Romance\"):\n y_predicted.append(1)\n else:\n y_predicted.append(2)\n y_correct.append(0)\n elif (categories[k] == \"Romance\"):\n if (pred == \"Romance\"):\n romance += 1\n y_predicted.append(1)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(2)\n y_correct.append(1)\n elif (categories[k] == \"Crime\"):\n if (pred == \"Crime\"):\n crime += 1\n y_predicted.append(2)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(1)\n y_correct.append(2)\n k = k + 1\n\n # Print results\n score = precision_recall_fscore_support(y_correct, y_predicted, average='weighted')\n # print(\"Number of records classified per second = %d\" % (round((1000/(time.process_time()-time0)),3)))\n print(\"________SCORES__________\")\n print(\"MODEL : \" + models[i])\n print(\"VECTORIZER : \" + vectorizers[j])\n print(\"Horror : %d/333\" % (horror))\n print(\"Romance : %d/333\" % (romance))\n print(\"Crime : %d/334\" % (crime))\n print(\"Precision : %.5f\" % (score[0]))\n print(\"Recall : %.5f\" % (score[1]))\n print(\"F(1) Score : %.5f\" % ((score[1] * score[0] / (score[1] + score[0])) * 2))\n print(\"F(W) Score : %.5f\" % (score[2]))\n print(\"Accuracy : %.5f\" % accuracy_score(y_correct, y_predicted))\n # print(confusion_matrix(y_correct, y_predicted))\n\n dic = {}\n dic['model'] = models[i].title()\n dic['vectorizer'] = vectorizers[j][:-11]\n dic['horror'] = str(horror) + '/' + '333'\n dic['romance'] = str(romance) + '/' + '333'\n dic['crime'] = str(crime) + '/' + '334'\n dic['precision'] = round(score[0], 3)\n dic['Recall'] = round(score[1], 3)\n dic['F(1) Score'] = round(((score[1] * score[0] / (score[1] + score[0])) * 2), 3)\n dic['F(W) Score'] = round(score[2], 3)\n dic['accuracy'] = round(accuracy_score(y_correct, y_predicted), 3)\n stats.append(dic)\n # Store stats in file\n joblib.dump(stats, path + \"classification_results.txt\")\n\n print(\"Done\")\n return stats",
"def parse_records(self):\n for record in sp.parse(gzip.open(\n \"./human_uniprot_04_07_20.gz\", 'rt')):\n # print(record.taxonomy_id)\n # if record.organism != \"Homo sapiens\":\n # continue\n # print(record.features[0])\n # for comment in record.comments:\n # if comment.startswith(\"SUBCELLULAR LOCATION\"):\n # print(comment)\n self.extract_features_to_dict(record)\n self.extract_localization(record)",
"def voc_ap(rec, prec):\n rec.insert(0, 0.0) # insert 0.0 at begining of list\n rec.append(1.0) # insert 1.0 at end of list\n mrec = rec[:]\n prec.insert(0, 0.0) # insert 0.0 at begining of list\n prec.append(0.0) # insert 0.0 at end of list\n mpre = prec[:]\n \"\"\"\n This part makes the precision monotonically decreasing\n (goes from the end to the beginning)\n \"\"\"\n # matlab indexes start in 1 but python in 0, so I have to do:\n # range(start=(len(mpre) - 2), end=0, step=-1)\n # also the python function range excludes the end, resulting in:\n # range(start=(len(mpre) - 2), end=-1, step=-1)\n for i in range(len(mpre) - 2, -1, -1):\n mpre[i] = max(mpre[i], mpre[i + 1])\n \"\"\"\n This part creates a list of indexes where the recall changes\n \"\"\"\n # matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;\n i_list = []\n for i in range(1, len(mrec)):\n if mrec[i] != mrec[i - 1]:\n i_list.append(i) # if it was matlab would be i + 1\n \"\"\"\n The Average Precision (AP) is the area under the curve\n (numerical integration)\n \"\"\"\n # matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));\n ap = 0.0\n for i in i_list:\n ap += ((mrec[i] - mrec[i - 1]) * mpre[i])\n return ap, mrec, mpre",
"def dict_of_recs_for_run (ins, exp, runnum) :\n return calibration_runs(ins, exp)[runnum]",
"def process_all():\n\tfiles = os.listdir('records')\n\tfiles = [file for file in files if file not in ('.DS_Store','old')]\n\tattr_list = []\n\tcorpus = []\n\tsentences = []\n\tcorp_set = set()\n\tfor file in files:\n\t\twith open('records/'+file) as f:\n\t\t\tattr_list, corpus, sentences = proc_file(f,file,corpus,attr_list,corp_set,sentences)\n\treturn attr_list,corpus,sentences",
"def filter(self, rec):\n\t\tans = []\n\t\tfor line in rec:\n\t\t\t# print 25*'-'\n\t\t\tif 'rr' in line:\n\t\t\t\t# print 'rr: ',line['rr']\n\t\t\t\t# print 'mdns'\n\t\t\t\ta = self.rr(line)\n\n\t\t\t\tif a:\n\t\t\t\t\ta['type'] = 'rr'\n\t\t\t\t\tans.append(a)\n\t\t\telif 'type' in line:\n\t\t\t\t# print line['type']\n\t\t\t\trtype = line['type']\n\t\t\t\t# if type == 'ptr': print 'ptr'\n\t\t\t\t# elif type == 'txt': print 'txt'\n\t\t\t\tif rtype == 'aaaa': ans.append(line)\n\t\t\t\telif rtype == 'a': ans.append(line)\n\t\t\t\telif rtype == 'arp': ans.append(line)\n\t\t\t\telse: print('<<<<', line, '>>>>>>>')\n\t\t\telse:\n\t\t\t\tprint('******', line, '*******')\n\t\treturn ans",
"def _parse_records(self, customization=None):\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != \"\":\n logger.debug('The record is not empty. Let\\'s parse it.')\n parsed = self._parse_record(record, customization=customization)\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n\n records = []\n record = \"\"\n # read each line, bundle them up until they form an object, then send for parsing\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n # Remove leading whitespaces\n line = line.lstrip()\n logger.debug('Line starts with @')\n # Parse previous record\n _add_parsed_record(record, records)\n # Start new record\n logger.debug('The record is set to empty')\n record = \"\"\n # Keep adding lines to the record\n record += line\n\n # catch any remaining record and send it for parsing\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records",
"def records(self):\r\n raise NotImplementedError()",
"def __init__(self):\n \"\"\"\n This be a list of posts, which will let us see exactly what the person was exposed\n to without having to go through an outside list\n \"\"\"\n self.read_posts = []\n \"\"\"\n This will be a numpy array of values that \n \"\"\"\n self.read_posts_indices = np.ones(0)\n self.all_posts = []",
"def __init__(self):\n self.movie_reviews = []",
"def __init__(self, *records: ScalarSequence):\n self._records = [r for r in records if r]",
"def parse_rec(json_dataset, index):\n info = voc_info(json_dataset)\n data_path = info['data_path']\n image_file = os.path.join(data_path, 'images', index + '.jpg')\n assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)\n\n height, width = cv2.imread(image_file).shape[:2]\n annopath = os.path.join(data_path, 'annotations', '{:s}.txt')\n filename = annopath.format(index)\n rotate = 0\n objects = []\n with open(filename) as f:\n line = f.readline()\n while line:\n parts = line.split()\n if parts[0] == 'rotate':\n rotate = int(parts[1])\n assert rotate == 0\n else:\n obj_struct = {'name': parts[0]}\n x1 = min(max(int(parts[1]), 0), width - 1)\n y1 = min(max(int(parts[2]), 0), height - 1)\n x2 = min(max(int(parts[3]), 0), width - 1)\n y2 = min(max(int(parts[4]), 0), height - 1)\n obj_struct['bbox'] = [x1, y1, x2, y2]\n obj_struct['truncated'] = int(parts[5])\n obj_struct['difficult'] = 0\n objects.append(obj_struct)\n line = f.readline()\n\n return objects",
"def REC_2s():\n return 2",
"def parseArray(self, data):\n self.title = data[0]\n self.director = data[1]\n self.cast = data[2]\n self.producer = data[3]\n self.writer = data[4]\n self.country = data[5]\n self.language = data[6]\n self.year = data[7]\n self.genres = data[8]\n self.votes = data[9]\n self.rating = float(data[10])\n self.runtime = data[11]\n self.plot = data[12]\n self.coverUrl = data[13]",
"def process_preds(preds, lines, lines_bal, verbose=True):\n assert len(lines) + len(lines_bal) == len(preds), \"Total number of lines does not match number of predictions!\"\n\n nspec, nboxes = preds[0].shape\n nboxes //= 2\n if verbose:\n print(f\"INFO: nspec = {nspec}, nboxes={nboxes}\")\n nlines = len(lines)\n\n # Doing non BAL lines first\n c_line = np.zeros((nlines, nspec))\n z_line = np.zeros_like(c_line) # This ensures they're always the same shape.\n i_to_wave = lambda x: np.interp(x, np.arange(len(wave)), wave)\n\n for il, line in enumerate(lines):\n l = absorber_IGM[line]\n\n # j is the box number, offset is how far into the box the line is predicted to be\n j = preds[il][:, :13].argmax(axis=1)\n offset = preds[il][np.arange(nspec, dtype=int), nboxes + j]\n\n # Confidence in line, redshift of line\n c_line[il] = preds[il][:, :13].max(axis=1)\n z_line[il] = (i_to_wave((j + offset) * len(wave) / nboxes) / l) - 1\n\n # Not \"best redshift\", rather \"redshift of most confident line\"\n zbest = z_line[c_line.argmax(axis=0), np.arange(nspec)]\n zbest = np.array(zbest)\n\n # Code for BAL boxes is the same as above just run on the BAL lines.\n nlines_bal = len(lines_bal)\n c_line_bal = np.zeros((nlines_bal, nspec))\n z_line_bal = np.zeros_like(c_line_bal)\n\n for il, line in enumerate(lines_bal):\n l = absorber_IGM[line]\n\n j = preds[nlines+il][:, :13].argmax(axis=1)\n offset = preds[il+nlines][np.arange(nspec, dtype=int), nboxes + j]\n\n c_line_bal[il] = preds[il + nlines][:, :13].max(axis=1)\n z_line_bal[il] = (i_to_wave((j + offset) * len(wave) / nboxes) / l) - 1\n\n return c_line, z_line, zbest, c_line_bal, z_line_bal",
"def _parse(self, array):\n return [self._parse_note(x) for x in array]",
"def parse_fastq (rec_lines):\n data = []\n data.append(rec_lines[0][1:])\n data.append(rec_lines[1])\n data.append(rec_lines[3])\n return data",
"def _read(self, documents):\n data = []\n X,Y = [], []\n for document in documents:\n d_ata = pd.read_csv(document, sep='\\t', names=['review','label'])\n data.append(d_ata)\n data = pd.concat(data)\n self.data = data\n Y = data.label\n self.vec.fit(data.review)\n X = self.preprocess(data)\n \n return train_test_split(X,Y)",
"def predict(self, docs):\n \n tf_idf_vecs = self.tfidf.transform(docs)\n \n y_pred = np.zeros((tf_idf_vecs.shape[0], len(self.gbms)))\n \n for i,gbm in enumerate(self.gbms):\n y_pred[:,i] = gbm.predict(tf_idf_vecs)\n \n return y_pred",
"def Train(self):\n\n lem = lemmatization()\n # Get Mongo client\n client = MongoClient()\n db = client['IR']\n collection = db['Movies']\n print(\"collection: \", collection)\n host = '127.0.0.1' # or localhost\n port = 27017\n client = MongoClient(host, port)\n # # 创建数据库dialog\n db = client['allMovies']\n # # 创建集合scene\n collection = db[\"Movie\"]\n print(collection.__sizeof__())\n print(collection.find_one({\"content.genres.name\": \"Drama\"}))\n\n # Path to folder to store trained data set\n path = self.path\n\n query_results = []\n for i in (collection.find({\"name\": \"183.txt\"})):\n query_results.append(i)\n print(\"queryyy\", query_results)\n\n # Dictionary to store the terms appearing in the genres\n dictionary = []\n\n # List to store category of each record\n categories = []\n\n training_data = []\n # Document ids of records to be trained\n doc_ids = []\n a = 0\n i=0\n movie=query_results[0]\n tsv_file = open(\n \"/home/do/PycharmProjects/pythonProject/information-retrival-search-engine/informationRetrival/classification/test_data.tsv\")\n read_tsv = csv.reader(tsv_file, delimiter=\"\\t\")\n for row in read_tsv:\n training_data.append(row[1])\n categories.append(row[2])\n dict_rec = row[1].lower()\n # table = maketrans(string.punctuation, \" \")\n for s in string.punctuation:\n dict_rec = dict_rec.replace(s, \"\")\n # dict_rec = str(dict_rec).translate(string.punctuation)\n dict_rec = lem.removeStopWords(dict_rec.split(\" \"))\n\n # Add to dictionary\n if dict_rec not in dictionary:\n dictionary.extend(dict_rec)\n\n # print(row[2])\n # while i<=99:\n #\n # training_data.append(movie['content'][i]['overview'])\n # doc_ids.append(movie['_id'])\n # # for genre in movie['content'][i]['genres']:\n # # print(\"genre \", genre['name'])\n # # a = a + 1\n # #\n # # if ((genre['name'] == 'Horror') or (genre['name'] == 'Romance') or (genre['name'] == 'Crime') or genre[\n # # 'name'] == 'Comedy') and a <= 160:\n # # categories.append(genre['name'])\n #\n # # Convert to lower case and remove stop words from overview\n # dict_rec = movie['content'][i]['overview'].lower()\n # # table = maketrans(string.punctuation, \" \")\n # for s in string.punctuation:\n # dict_rec = dict_rec.replace(s, \"\")\n # # dict_rec = str(dict_rec).translate(string.punctuation)\n # dict_rec = lem.removeStopWords(dict_rec.split(\" \"))\n #\n # # Add to dictionary\n # if dict_rec not in dictionary:\n # dictionary.extend(dict_rec)\n # i=i+1\n print(\"Dictionary\", dictionary)\n print(\"shape\", len(dictionary))\n dictionary = filter(None, list(set(dictionary)))\n\n # Store dictionary in a file\n joblib.dump(dictionary, path + \"_Genre_Dictionary\")\n\n # Store doc ids of trained data in a file\n myfile = open(r'doc_ids.pkl', 'wb')\n #pickle.dump(doc_ids, myfile)\n #myfile.close()\n\n # Initialize training models\n mod_1 = SVC(kernel='linear', C=1, gamma=1)\n mod_2 = LogisticRegression()\n mod_3 = GaussianNB()\n mod_4 = MultinomialNB()\n mod_5 = BernoulliNB()\n\n # Ensemble classifiers\n mod_6 = RandomForestClassifier(n_estimators=50)\n mod_7 = BaggingClassifier(mod_2, n_estimators=50)\n mod_8 = GradientBoostingClassifier(loss='deviance', n_estimators=100)\n\n mod_9 = VotingClassifier(\n estimators=[(\"SVM\", mod_1), (\"LR\", mod_2), (\"Gauss\", mod_3), (\"Multinom\", mod_4), (\"Bernoulli\", mod_5),\n (\"RandomForest\", mod_6), (\"Bagging\", mod_7), (\"GB\", mod_8)], voting='hard')\n mod_10 = VotingClassifier(\n estimators=[(\"SVM\", mod_1), (\"LR\", mod_2), (\"Multinom\", mod_4), (\"Bernoulli\", mod_5), (\"Bagging\", mod_7)],\n voting='hard', weights=[1, 2, 3, 2, 1])\n\n # Vectorizers for feature extraction\n vec_1 = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n vec_2 = feature_extraction.text.TfidfVectorizer(vocabulary=dictionary)\n\n vec_list = [vec_1, vec_2]\n vec_list = [vec_1]\n # List of training models\n model_list = [mod_1, mod_2, mod_3, mod_4, mod_5, mod_6, mod_7, mod_8, mod_9, mod_10]\n\n models_used = [\"SVM\", \"LOGISTIC REGRESSION\", \"GAUSSIAN NB\",\n \"MULTINOMIAL NB\", \"BERNOULLI NB\", \"RANDOM FOREST\", \"BAGGING\", \"GRADIENT\",\n \"Voting\", \"Voting With Weights\"]\n\n vec_used = [\"COUNT VECTORIZER\", \"TFIDF VECTORIZER\"]\n\n print(\"Starting training. This might take a while...\")\n b = 1\n # Start training\n for model in range(0, len(model_list)):\n a = 1\n for vec in range(0, len(vec_list)):\n mod = model_list[model]\n vector = vec_list[vec]\n print(\"tour\", a, b)\n print(\"taille training : \", (np.shape(training_data)))\n print(training_data)\n print(vector)\n # print(\"fit_tarnsform\", vector.fit_transform(training_data))\n X = vector.fit_transform(training_data).toarray()\n print(\"la matrice x\",1 in X)\n print(\"shape X\", np.shape(X))\n print(np.shape(categories))\n # categories.reshape((80, 2))\n # l=[]\n # l.append([categories[0:79],categories[79:,159]])\n # print(l)\n print(\"categories\", categories)\n\n print(np.unique(categories))\n print(np.unique(X))\n mod.fit(X, categories)\n print(\"fiit\", mod.fit(X, categories))\n\n # Store in a file\n joblib.dump(mod, path + models_used[model] + \"_\" + vec_used[vec] + \".pkl\")\n\n print(models_used[model] + \" \" + vec_used[vec] + \" finished!\")\n a = a + 1\n b = b + 1\n break\n print(\"All Done!!\")",
"def get_rec(self):\n\n #to address cold start problem: checks if user activity is above 5 or so lessons\n # if yes returns recs based on user2user_similarity\n # else returns recs based on item2item_similarity\n pass",
"def test_pipeline_data(self, doc_list):\n summaries = []\n for item in doc_list:\n summaries.append(self.preprocess(item))\n return summaries",
"def records(self):\n return self._records",
"def _read_recs(basedir):\n for borotag in boro_tags:\n datafile = \"%s/%s.csv\" % (basedir,borotag)\n print(\"slurp '%s' ..\" % datafile)\n recs = read_recs(datafile)\n yield from (pluto.parse.normalize(r) for r in recs)",
"def scoreIndPosts(fileName,scoredRecords,outdir='scoreFiles'):\n fOut = codecs.open(outdir+\"/\"+fileName, encoding=\"utf-8\",mode=\"wb\")\n for term in scoredRecords:\n record = term[0][1]\n score = term[1]\n buffer = [record.lat,record.lon,record.text,score,record.user,record.dt,record.img]\n buffer = map(lambda x: x if type(x) == unicode else unicode(x),buffer) #convert floats / ints to unicode for writing\n buffer = map(lambda x: x.replace(u'\\n', u''),buffer)\n fOut.write(u'\\t'.join(buffer) + u'\\n')\n fOut.close()",
"def predict(self, documents):\n raise NotImplementedError()",
"def __iter__(self):\n\t\tfields = 'fieldname', 'text', 'docfreq', 'indexfreq'\n\t\tcur = self.index.collection.find(fields=fields).sort('fieldname')\n\t\treturn (tuple(rec[field] for field in fields) for rec in cur)",
"def __init__(self):\n self.data = []\n self.record = {}"
]
| [
"0.5232923",
"0.5217052",
"0.51894987",
"0.51644367",
"0.5116569",
"0.5047526",
"0.50373435",
"0.50315493",
"0.50176704",
"0.49668643",
"0.48515674",
"0.48328376",
"0.48281032",
"0.48136282",
"0.4806055",
"0.48041537",
"0.47668582",
"0.4759249",
"0.47503585",
"0.47404984",
"0.47384173",
"0.47350553",
"0.4729474",
"0.47229695",
"0.47180858",
"0.47025973",
"0.4675715",
"0.46748984",
"0.4664045",
"0.46520463"
]
| 0.5888206 | 0 |
Yield sample analysis results fetched from the server. | def get_analysis_results(self, cache=True):
if cache and self._get_result_cache:
for ar in self._get_result_cache:
yield ar
return
url = f'sample_ars?sample_id={self.uuid}'
result = self.knex.get(url)
for result_blob in result['results']:
result = self.analysis_result(result_blob['module_name'])
result.load_blob(result_blob)
# We just fetched from the server so we change the RemoteObject
# meta properties to reflect that
result._already_fetched = True
result._modified = False
if cache:
self._get_result_cache.append(result)
else:
yield result
if cache:
for ar in self._get_result_cache:
yield ar | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_data(self):\n if self.random_seeds: \n self._validate_random_seeds()\n seed_iter = list(map(iter,self.random_seeds))\n nsamples = len(self.random_seeds[0])\n else:\n seed_iter = None\n nsamples = self.numsamples\n self._set_meta_features()\n for _ in tqdm(range(nsamples)):\n self._update_meta_features(seed_iter)\n self._sample()\n yield self._extract_features()",
"async def _samples(cls, responses: SourceResponses) -> AsyncIterator[Samples]:\n rows = await cls.__parse_csv(responses)\n samples = [row for row in rows if not row[\"responseMessage\"].startswith(\"Number of samples in transaction\")]\n labels = {sample[\"label\"] for sample in samples}\n for label in sorted(labels):\n yield [sample for sample in samples if sample[\"label\"] == label]",
"def run_sample(self):\n # there will be validation failures for sample data\n self.validate_req(ignore_failure=True)\n runner_fn = self.model_runner.execute_model_for_sample_data\n return self.do_handle_request(runner_fn)",
"async def test_all_samples(self):\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"248\", entities=[])",
"def test_get_all_samples(self):\n self.login()\n\n page_size = 20\n\n # hit the API endpoint for both pages\n for page in range(1, 3):\n\n data = {'page': page,\n 'page_size': page_size}\n response = self.client.get(reverse('searchsamples'), data, format='json')\n\n expected = Sample.objects.all().order_by(\"-received\")\n\n paginator = Paginator(expected, page_size)\n res = paginator.page(page)\n\n # format queryset into json for returning\n serializer = SampleSerializer(res, many=True)\n\n context = {\n 'data': serializer.data,\n 'more': (page == 1)\n }\n\n self.assertEqual(response.json(), context)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def samples(self):\n pass",
"def __iter__(self):\n for sample in self.data:\n yield sample",
"def sample(self):\n timestamp = time.time()\n try:\n res = requests.get(self.url)\n except requests.exceptions.ConnectionError as error:\n LOG.warning(\"%s %s\", self, error)\n return\n if 199 < res.status_code < 300:\n self.data.append((timestamp, res.json()))\n LOG.debug(\"%s appended data sample\", self)\n else:\n LOG.warning(\"Error %s loading data from %s\", res.status_code, self)\n self.data = self.data[-self.max_samples:]",
"def sample(self):\n logger.info(\"%s: collect sensor data\", self.__class__.__name__)\n samples = []\n self._fetch_data(samples)\n return samples",
"def __iter__(self):\n for sample in self.samples:\n yield sample",
"def getResults():",
"def __iter__(self) :\n for s in self._samples_to_cache :\n yield s",
"async def stream_result_files(self) -> AsyncGenerator[StreamInfoUrl, None]:\n for dataset in self.datasets:\n async for file in dataset.get_data_rootfiles_stream(self.query.value()):\n yield file",
"def get_paginated_biosamples(self):\n more = True\n next_link = None\n while more:\n self.echo_debug(\"Get BaseSpace Biosamples page\")\n resp = self.get_biosamples(next_link)\n\n yield resp.results\n next_link = resp.meta.next\n more = next_link is not None",
"def __iter__(self):\n with self.handler as handler:\n if self.shuffle:\n # load all samples into memory\n samples = []\n while True:\n sample = handler.read()\n if sample is None:\n break\n sample = self.transform(sample)\n samples.append(sample)\n random.shuffle(samples)\n for sample in samples:\n yield sample\n else:\n # lazy-loading mode\n while True:\n sample = handler.read()\n if sample is None:\n break\n sample = self.transform(sample)\n yield sample",
"def _fetch_data(self, samples):\n pass",
"def run(self):\n results = self.fetch()\n return results",
"def sampler(self, *args, **kwargs):\n\n return (samples_subgraphs ** 2).tolist()",
"def get_samples(self) -> McmcPtResult:",
"def get_test_results(self, action_instance_id, filters=None):\n yield",
"def get_samples():\n r = req('GET', SUB_API + 'samples', params=handle_filters())\n samples = []\n for k in demisto.get(r.json(), 'data.items'):\n samples.append(sample_to_readable(k))\n md = tableToMarkdown('ThreatGrid - List of Samples', samples, [\n 'ID', 'Filename', 'State', 'Status', 'MD5', 'SHA1', 'SHA256', 'OS', 'SubmittedAt', 'StartedAt', 'CompletedAt'\n ])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': samples},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })",
"def list_samples(arn=None, nextToken=None):\n pass",
"async def test_successful_samples(self):\n self.set_source_parameter(\"test_result\", [\"success\"])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"242\", entities=[])",
"def list(self, request):\n urls = {\n 'msg': 'Must use bulk_by_sample to get SCCmec Primer hits',\n }\n\n return Response(urls)",
"def get_data(self):\n if self.random_seeds: \n self._validate_random_seeds()\n seed_iter = list(map(iter,self.random_seeds))\n nsamples = len(self.random_seeds[0])\n else:\n seed_iter = None\n nsamples = self.numsamples\n progress_bar = tqdm(range(nsamples))\n self._set_meta_features()\n task_dict = {}\n finished_tasks = 0\n for _ in range(min(nsamples,self.numworkers)): \n self._prepare_and_start_task(task_dict,seed_iter)\n while finished_tasks < nsamples: \n done_ids, pending_ids = ray.wait(list(task_dict.keys()))\n if done_ids:\n id = done_ids[0]\n finished_tasks += 1\n try:\n data, times, pid = ray.get(id)\n except Exception as exception:\n self.logger.info(\"task with id %s failed with Traceback:\" %task_dict[id], exc_info=True)\n raise exception\n times[-1] = time() # add getter time\n data['idx'] = task_dict.pop(id)\n self.logger.info('id %i on pid %i: finished task.' %(data['idx'],pid))\n self._log_execution_time(data['idx'], times, pid)\n if (nsamples - self._idx) > 0: # directly _schedule next task\n self._prepare_and_start_task(task_dict,seed_iter)\n progress_bar.update()\n yield data",
"def iter_spectra(self):\n for record in self.session.query(SpectrumLibraryIndexRecord).order_by(\n SpectrumLibraryIndexRecord.number).yield_per(10000):\n yield record",
"async def stream_result_file_urls(self) -> AsyncGenerator[StreamInfoUrl, None]:\n for dataset in self.datasets:\n async for file in dataset.get_data_rootfiles_url_stream(self.query.value()):\n yield file",
"def scan_uploaded_samples() -> dict:\n while Analyzer.scanning:\n # Retrieve the scan results\n scan_results = Scanner.get_scans(ids=scan_id)\n try:\n if scan_results[\"body\"][\"resources\"][0][\"status\"] == \"done\":\n # Scan is complete, retrieve our results\n results = scan_results[\"body\"][\"resources\"][0][\"samples\"]\n # and break out of the loop\n Analyzer.scanning = False\n else:\n # Not done yet, sleep for a bit\n time.sleep(Config.scan_delay)\n except IndexError:\n # Results aren't populated yet, skip\n pass\n\n return results",
"def getTestResults():",
"def test_api_samples(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('samples', r)"
]
| [
"0.6731637",
"0.6690499",
"0.66678214",
"0.6459945",
"0.6429607",
"0.64113545",
"0.6407596",
"0.6363508",
"0.6355756",
"0.6331724",
"0.6320033",
"0.6299205",
"0.62114656",
"0.6101482",
"0.6099996",
"0.6096582",
"0.60882914",
"0.6019894",
"0.59304935",
"0.59299564",
"0.58679205",
"0.5858832",
"0.58495957",
"0.58367276",
"0.5826894",
"0.58197224",
"0.58097565",
"0.5808483",
"0.5804412",
"0.578556"
]
| 0.73539895 | 0 |
Return a manifest for this sample. | def get_manifest(self):
url = f'samples/{self.uuid}/manifest'
return self.knex.get(url) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_manifest(self):\r\n if os.path.exists(self.manifestfile):\r\n return Manifest(json.loads(file(self.manifestfile).read()))\r\n return Manifest({})",
"def get_manifest(self):\n return self.manifest",
"def manifest(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:\n return pulumi.get(self, \"manifest\")",
"def manifest(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"manifest\")",
"def build_manifest(self):\n return self._build_manifest",
"def manifest_file(self):\n return self._manifest_file",
"def fetch_manifest(self):\n manifest = self.open(self.urls['manifest'])\n return manifest.read()",
"def get_sample_manifest(request, pk):\n sample = Sample.objects.get(pk=pk)\n mygrp = sample.library.group\n if not mygrp.is_public:\n try:\n membership_queryset = request.user.organization_set.filter(pk=mygrp.organization.pk)\n authorized = membership_queryset.exists()\n except AttributeError: # occurs if user is not logged in\n authorized = False\n if not authorized:\n raise PermissionDenied(_('Insufficient permissions to get group manifest.'))\n blob = SampleSerializer(sample).data\n blob['analysis_results'] = []\n for ar in sample.analysis_result_set.all():\n ar_blob = SampleAnalysisResultSerializer(ar).data\n del ar_blob['sample_obj']\n ar_blob['fields'] = []\n for field in ar.fields.all():\n field_blob = SampleAnalysisResultFieldSerializer(field).data\n del field_blob['analysis_result_obj']\n ar_blob['fields'].append(field_blob)\n blob['analysis_results'].append(ar_blob)\n\n return Response(blob)",
"def manifest_dict(self):\n return self._parsed",
"def manifest_dict(self):\n return self._parsed",
"def plugin_manifest():\n\n # XXX: note, this doesn't get called. For an example of this working,\n # see the mockplugin unit test.\n\n filepath = importlib.resources.files(__package__) / \"plugin_manifest.json\"\n return manifest.manifest_from_string(\n filepath.read_text()\n )",
"def manifest():\n return setup((), _manifest=1)",
"def get_manifestfile(self):\r\n if self.__manifest_file is None:\r\n return os.path.join(self.cloudletdir, \"manifest\")\r\n return self.__manifest_file",
"def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])",
"def manifest_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"manifest_type\")",
"def read_manifest(self): # -> None:\n ...",
"def get_manifest():\n n = request.args.get( # TODO make configurable at app level\n 'n', type=int, default=10\n )\n if n > 10:\n abort(400, 'n too large, max is 10') # TODO get the max config value here too\n manifest_lines = io.StringIO()\n with jsonlines.Writer(manifest_lines, sort_keys=True) as writer:\n writer.write_all(\n chain.get_manifest_json()\n for chain in manager.get_most_recent_chains_for_authorities(n)\n )\n\n return (\n manifest_lines.getvalue(),\n {\n 'Content-Type': 'application/json',\n 'Content-Disposition': 'inline; filename=\"manifest.jsonl\"',\n },\n )",
"def build_manifest(self, root):\n manifest = ET.SubElement(root, \"manifest\")\n for sid, href, media_type in self.manifest:\n args = {\"id\": sid, \"href\": href, \"media-type\": media_type}\n ET.SubElement(manifest, \"item\", **args) # pylint: disable-msg=W0142",
"def _read_manifest_json(self):\n with open(os.path.join(self._crx_dir, \"manifest.json\")) as manifest:\n return json.load(manifest)",
"def get_manifest_raw(args):\n # If we're given a path to a manifest file, use it\n if os.path.exists(args.manifest_location):\n manifest_fn = args.manifest_location\n log(\"INFO\", \"Using manifest file at location: {}\".format(manifest_fn))\n with open(manifest_fn, 'r') as manifest_file:\n manifest_raw = manifest_file.read()\n # Otherwise, use the CMake Magic manifest\n else:\n manifest_raw = _MANIFEST_CONTENTS\n log(\"TRACE\", \"Raw manifest contents: {}\".format(manifest_raw))\n return manifest_raw",
"def manifest_type(self) -> Optional[pulumi.Input[Union[str, 'GenerationManifestType']]]:\n return pulumi.get(self, \"manifest_type\")",
"def fact():\n manifests = [x for x in os.walk(manifests_dir)]\n\n return { 'manifests': manifests }",
"def _load_manifest(self, filename: Optional[str] = None) -> Dict[str, str]:\n filename = filename or self.manifest_filename\n if not os.path.isfile(filename):\n self.log.debug(f\"Manifest file '{filename}' doesn't exist and will be created.\")\n return {}\n with open(filename, \"r\") as f:\n manifest: Dict[str, str] = json.load(f)\n self.log.debug(f\"Reading manifest '{manifest}' from file '{filename}'\")\n return manifest",
"def get_manifest_info(manifest_xml):\n\n manifest_info = mf_parse.Manifest(manifest_xml, is_bytes=True)\n manifest_data = manifest_info.parse_data()\n\n return mf_info.ManifestInfo(manifest_data)",
"def manifestContent( self, pars, directory ):\n\n return None",
"def readManifestEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MANIFEST_SECTION)",
"def manifest(self):\n yield self._meta\n for dir_key, meta in self._walk_dir_meta():\n yield {'logical_key': dir_key, 'meta': meta}\n for logical_key, entry in self.walk():\n yield {'logical_key': logical_key, **entry.as_dict()}",
"def get_destiny_manifest(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Manifest/\"))",
"def gen_manifest(stage_dir):\n manifest = {'files': []}\n\n for root, dirs, files in os.walk(stage_dir):\n for file_ in files:\n fullpath = os.path.join(root, file_)\n contents = open(fullpath, 'rb').read()\n sha1 = hashlib.sha1(contents).hexdigest()\n filename = os.path.relpath(fullpath, stage_dir)\n mode = get_permission(fullpath)\n manifest['files'].append({'path': filename, 'sha1': sha1,\n 'mode': mode})\n return manifest",
"def asset_manifest_path(self) -> str:\n return self._values.get(\"asset_manifest_path\")"
]
| [
"0.7615293",
"0.75619924",
"0.7228468",
"0.69406515",
"0.67661595",
"0.6721726",
"0.67200625",
"0.6517161",
"0.65150267",
"0.65150267",
"0.6418849",
"0.64011556",
"0.632388",
"0.6211856",
"0.61604166",
"0.61489534",
"0.60881954",
"0.6037556",
"0.59305507",
"0.58315796",
"0.5819782",
"0.57660747",
"0.5725925",
"0.57174593",
"0.5714475",
"0.57006484",
"0.56387335",
"0.55725735",
"0.5554765",
"0.55297506"
]
| 0.814788 | 0 |
Returns the canvas size in pixels. | def getCanvasSize():
return canvas.winfo_width(), canvas.winfo_height() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSize(self):\n return self.screen.get_size()",
"def canvas_size(self):\r\n width = height = 0\r\n for image in self.images:\r\n x = image.x + image.absolute_width\r\n y = image.y + image.absolute_height\r\n if width < x:\r\n width = x\r\n if height < y:\r\n height = y\r\n return round_up(width), round_up(height)",
"def get_pixel_size(self):\n raise NotImplementedError",
"def pix_size(self):\n return self._pix_size",
"def size(self) -> (float, float):\n\n return self.screen.get_surface().get_size()",
"def numPixels(self):\n\t\treturn self.size",
"def numPixels(self):\n\t\treturn self.size",
"def px_size(self):\n xp, yp = ct.c_float(), ct.c_float()\n\n self.lib.GetPixelSize(ct.pointer(xp), ct.pointer(yp))\n\n return (xp.value, yp.value)",
"def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()",
"def get_size(self):\n return self._surf.get_size()",
"def size(self):\n\n\t\treturn self._window.size",
"def getSize(self):\n return self.__width * self.__height;",
"def size(self):\n return self.width",
"def size(self):\n return self.width",
"def size(self):\n return self.width",
"def size(self):\n return self.width",
"def size(self):\n return self.width",
"def size(self):\n return self.width",
"def size(self):\n return self.width",
"def size(self):\n\n return self.width",
"def get_display_px(self):\n return self.image.size",
"def get_pixel_size(self):\n p0 = core.PointF(0, 0)\n p1 = core.PointF(1, 1)\n tr = self.transform().inverted()[0]\n p01 = tr.map(p0)\n p11 = tr.map(p1)\n return core.PointF(p11 - p01)",
"def _size_pixels(self, renderer):\n return renderer.points_to_pixels(self.size)",
"def brush_size(self) -> int:\n # get the brush size context and return its value\n with self._brush_size.get_lock():\n return self._brush_size.value",
"def get_video_window_size(self):\n alloc = self.drawingarea.get_allocation()\n return (alloc.width, alloc.height)",
"def size(self):\n return self.__image.size",
"def get_size(self):\n result_str = subprocess.check_output([\n ADB_EXECUTOR, '-s', self.device_id, 'shell',\n 'wm', 'size'\n ]).decode(DEFAULT_CHARSET)\n width, height = result_str.replace('\\n', '').replace('\\r', '').split(' ')[-1].split('x')\n return width, height",
"def _get_render_area_size(self):\n render_area = self._get_render_area()\n return (render_area.width(), render_area.height())",
"def get_display_size(screen_id: int = 0) -> Tuple[int, int]:\n display = pyglet.canvas.Display()\n screen = display.get_screens()[screen_id]\n return screen.width, screen.height",
"def DoGetClientSize(self):\r\n \r\n return self._rect.width, self._rect.height"
]
| [
"0.76065636",
"0.7291654",
"0.72000766",
"0.71854603",
"0.71594596",
"0.7155651",
"0.7155651",
"0.7099896",
"0.70850754",
"0.70767885",
"0.7060285",
"0.7034011",
"0.7029985",
"0.7029985",
"0.7029985",
"0.7029985",
"0.7029985",
"0.7029985",
"0.7029985",
"0.69946253",
"0.69839257",
"0.6979186",
"0.69257754",
"0.6924046",
"0.6915013",
"0.68764085",
"0.6873863",
"0.6867463",
"0.6849304",
"0.6831332"
]
| 0.8923893 | 0 |
Saves a python object to path (in filesytem). | def save_object(path,object):
with open(path,"wb") as f:
pickle.dump(object,f,pickle.HIGHEST_PROTOCOL) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_obj(obj, path ):\n with open(path, 'wb') as f:\n pickle.dump(obj, f)",
"def save_obj(obj, path: str):\n with open(path, 'wb') as h:\n pickle.dump(obj, h)",
"def save_object(obj, fpath):\r\n with open(fpath, 'wb') as o:\r\n pickle.dump(obj, o)",
"def picklesave(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)",
"def save_object(obj, file_name):\n file_name = osp.abspath(file_name)\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)",
"def save(self, path):\n pass",
"def save(self, path):\n pass",
"def save(self, path):\n pass",
"def pickle_save(file_path, obj):\n with open(file_path, 'wb') as f:\n pickle.dump(obj, f)",
"def save_pickle(obj, path):\n may_make_dir(osp.dirname(path))\n with open(path, 'w') as f:\n pickle.dump(obj, f)",
"def save_pickle(obj, path):\n may_make_dir(osp.dirname(osp.abspath(path)))\n with open(path, 'wb') as f:\n pickle.dump(obj, f, protocol=2)",
"def save_object(self, name: str, object):\n file_path = self.__get_file_path(name)\n self.__serialize_object(file_path, object)",
"def save(self, path: str):\n pass",
"def pickle_write(file_path, obj):\n\n with open(file_path, 'wb') as file:\n pickle.dump(obj, file)",
"def pickleSave(object, filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'wb')\n pickle.dump(object, filehandler)",
"def write_pickle(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)",
"def save_obj(obj, saved_name ):\n with open( saved_name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)",
"def save_object(obj, filename):\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, protocol=2)",
"def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)",
"def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)",
"def save_object(obj, filename):\n with open(filename, 'wb') as output_file: # Overwrites any existing file.\n pickle.dump(obj, output_file, pickle.HIGHEST_PROTOCOL)",
"def save_object(obj, filename):\r\n with open(filename, 'wb') as output:\r\n pickle.dump(obj, output)",
"def saveobject(obj, filename):\n # import cPickle as pickle\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)",
"def saveIntoFile(obj, path=\".\", file_name=\"saved_object.pickle\",\n folder_name=None):\n\n path_with_name = \"%s/%s\" % (path, file_name)\n if folder_name:\n os.makedirs(path_with_name + folder_name)\n path_with_name = \"%s/%s/%s\" % (path, folder_name, file_name)\n try:\n with open(path_with_name, \"wb\") as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)\n print(\"Object has been saved into %s/%s\" % (path, file_name))\n except IOError:\n raise InvalidFilesPath(\"Path: %s\\tfile name: %s\" % (path, file_name))",
"def save(obj, filename):\n import pickle\n with open(filename, 'w') as f:\n pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)",
"def pickle_to_file(obj, path):\n pickle.dump(obj, open(path, 'wb'))",
"def save_obj(obj, name):\n \n with open(name + '.pkl', 'wb') as objec:\n pickle.dump(obj, objec)",
"def save(path_to_model):\n pass",
"def save():",
"def save(self, obj, filename):\n if not self.enabled:\n return\n\n # get unique filepath and filename\n index = 0\n while True:\n filepath = join(self.path, filename+\"_\"+str(index))\n if os.path.isfile(filepath):\n index = index + 1\n continue\n break\n\n # save object\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"wb\") as f:\n try:\n pickle.dump(obj, f)\n except Exception as e:\n log.exception(e)\n log.warning(f\"save failed for {filename} {type(obj)}\")"
]
| [
"0.8127162",
"0.80762535",
"0.79843014",
"0.7827321",
"0.7706124",
"0.76796144",
"0.76796144",
"0.76796144",
"0.7668016",
"0.7657294",
"0.7656872",
"0.7633103",
"0.75708205",
"0.74377614",
"0.74298906",
"0.7400824",
"0.73958284",
"0.7359934",
"0.73415333",
"0.73415333",
"0.7337564",
"0.7330375",
"0.7321111",
"0.731281",
"0.7305965",
"0.72928286",
"0.72893995",
"0.7272517",
"0.72367877",
"0.72234774"
]
| 0.8243574 | 0 |
loads a python object from path (in filesytem). | def load_object(path):
with open(path,"rb") as f:
object = pickle.load(f)
return object | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_object(path):\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot + 1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj",
"def load_object(path):\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot+1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj",
"def load_object(path):\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError, \"Error loading object '%s': not a full path\" % path\n\n module, name = path[:dot], path[dot+1:]\n try:\n mod = __import__(module, {}, {}, [''])\n except ImportError, e:\n raise ImportError, \"Error loading object '%s': %s\" % (path, e)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError, \"Module '%s' doesn't define any object named '%s'\" % (module, name)\n\n return obj",
"def load(path):\n pass",
"def load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)",
"def load(self, path):\n pass",
"def load(self, path):\n pass",
"def load_obj(path: str):\n with open(path, 'rb') as h:\n return pickle.load(h)",
"def import_object(path):\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot+1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj",
"def load(self, path: str):\n pass",
"def load_object(fpath):\r\n with open(fpath, 'rb') as i:\r\n return pickle.load(i)",
"def load(path):\n \n with codecs.open(path, 'r', **rparams) as f:\n print ' > loading... {}'.format(path)\n if '.json' in path:\n obj = json.load(f, object_hook=json_numpy_obj_hook)\n elif '.pkl' in path:\n obj = pickle.load(file=f)\n else:\n # check the file referenced is sensible\n obj_id = [k for k in flocs.keys() if k in path]\n if obj_id is None or len(obj_id) != 1: raise ValueError(\n '{} not found in the path: \\n {}'.format(flocs.keys(), path))\n return obj",
"def get_object(path='', obj=None):\n if not path:\n return obj\n path = path.split('.')\n if obj is None:\n obj = importlib.import_module(path[0])\n path = path[1:]\n for item in path:\n if isinstance(obj, types.ModuleType):\n submodule = '{}.{}'.format(_package(obj), item)\n try:\n obj = importlib.import_module(submodule)\n except Exception as import_error:\n try:\n obj = getattr(obj, item)\n except:\n # FIXME: I know I should probably merge the errors, but\n # it's easier just to throw the import error since\n # it's most probably the one user wants to see.\n # Create a new LoadingError and throw a combination\n # of the import error and attribute error.\n raise import_error\n else:\n obj = getattr(obj, item)\n return obj",
"def load_object(filepath):\n with open(filepath, 'rb') as f:\n obj = pickle.load(f)\n return obj",
"def load(path):\n return ActWrapper.load(path)",
"def load(self, path):\n load_model(path, self)",
"def from_path(cls, path: str) -> Any:",
"def load(path_to_model):\n pass",
"def load_model(self, path):\n pass",
"def load_object(imp_path):\n module_name, obj_name = imp_path.split(\".\", 1)\n module = __import__(module_name)\n obj = attrgetter(obj_name)(module)\n\n return obj",
"def loadFromFile(self, path):\n\n if \"~\" in path:\n path = os.path.expanduser(path)\n f = open(path)\n body = f.read()\n f.close()\n self._path = path\n self.loadFromString(body)",
"def pkl_load(name, path = 'obj'):\n if '.p' not in name:\n name = name + '.pkl'\n path = os.path.join(path, name)\n try:\n obj = pickle.load(open(path, 'rb'))\n except FileNotFoundError:\n obj = None\n return obj",
"def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)",
"def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)",
"def loadObj(name):\n\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)",
"def import_python_obj(path: str) -> RAW_CFG:\n mod_name, obj_name = path.rsplit('.', 1)\n try:\n mod = import_module(mod_name)\n obj = getattr(mod, obj_name)\n except (ImportError, ValueError, ModuleNotFoundError, AttributeError):\n raise ConfigException(f\"Could not import python object: {path}\")\n return cast(RAW_CFG, obj)",
"def import_from_path(path_to_module, obj_name = None):\n module_name = path_to_module.replace(\"/\",\".\").strip(\".py\")\n module = import_module(module_name)\n if obj_name == None:\n return module\n obj = getattr(module, obj_name)\n return obj",
"def _load_obj(name):\n with open('/bigdisk/pickles/' + name, 'r') as f:\n return pickle.load(f)",
"def load(path):\n _, ext = os.path.splitext(path)\n\n if ext == '.json':\n return JSONRFile(path)\n elif ext == '.root':\n # NOTE: import is here to make dependency on uproot runtime optional\n # pylint: disable=import-outside-toplevel\n from .root_file import ROOTFile\n return ROOTFile(path)\n\n raise ValueError(\"Umknown file extension '%s'\" % (path, ))",
"def load_obj(name):\r\n with open('../pickle/' + name + '.pkl', 'rb') as fout:\r\n return pickle.load(fout)\r\n # end with\r"
]
| [
"0.821035",
"0.8207554",
"0.8137635",
"0.8132656",
"0.8068644",
"0.8003069",
"0.8003069",
"0.7993919",
"0.78682834",
"0.78477365",
"0.7648063",
"0.7589315",
"0.7433589",
"0.74033934",
"0.7324059",
"0.72719944",
"0.7229998",
"0.7194066",
"0.7148879",
"0.71485585",
"0.70853275",
"0.7076638",
"0.706214",
"0.706214",
"0.6975821",
"0.697513",
"0.69625604",
"0.6959323",
"0.69127685",
"0.6896953"
]
| 0.829935 | 0 |
Returns a list of values filled with random numbers. The number of elements changes each time the function is called | def get_value_list():
return [some_random_number() for _ in range(some_random_number())] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_numbers():\n\n return random.sample(range(100), 10)",
"def individual(min_val, max_val):\n value_list = [i for i in range(min_val, max_val+1)] #generate a list of 1 to 10\n random.shuffle(value_list) #shuffle the list\n return value_list",
"def newList(self):\n lst = []\n count = 0\n while count < 52:\n lst.append(randint(1, 1500))\n count += 1\n return lst",
"def list_gen(value):\n\n sample_list = random.sample(xrange(1, (value + 1)), value)\n return sample_list",
"def get_me_random_list(n):\n a_list = list(range(n))\n random.shuffle(a_list)\n return(a_list)",
"def getRandomList(n):\n lyst = list()\n for count in range (n):\n lyst.append(random.randint(1, n))\n return lyst",
"def generateRandomList(minval, maxval, size):\n return [random.randint(minval, maxval) for _ in range(size)]",
"def generate_testdata(N: int, min_value: int, max_value: int) -> list:\r\n numbers = set([])\r\n while len(numbers) < N:\r\n random = randint(min_value, max_value)\r\n numbers.add(random)\r\n return list(numbers)",
"def create_list(self):\n\n\t\trandom_list = random.sample(range(0, 500), 10)\n\n\t\treturn random_list",
"def generate_list(length: int) -> list:\n\n return [randint(0, length + 1) for _ in range(length)]",
"def generator(self, random, args):\r\n if self.duplicates:\r\n max_count = [self.capacity // item[0] for item in self.items]\r\n return [random.randint(0, m) for m in max_count]\r\n else:\r\n return [random.choice([0, 1]) for _ in range(len(self.items))]",
"def generatoze(b):\r\n l = []\r\n for i in range(b):\r\n k = random.randint(0, 100)\r\n l.append(k)\r\n return l",
"def random_values():\n while True:\n yield random()",
"def generate_list(size):\n items = [randint(0, MAX_NUM) for i in range(size)]\n return items",
"def loto() -> List[int]:\n numeros = []\n nbre_valeurs = 6\n val_min = 1\n val_max = 49\n\n nbre_elements = 0\n while nbre_elements <= nbre_valeurs:\n numero = random.randint(val_min, val_max)\n if numero not in numeros:\n numeros.append(numero)\n nbre_elements += 1\n\n return numeros",
"def individual(length, min, max):\r\n return [ randint(min, max) for x in range(length) ]",
"def data(i):\n m = i*100\n return [random.randint(0, m) for j in range(i)]",
"def rnd_pset(self):\n\t\treturn [rnd() * 10, rnd() * 10, rnd() * 12 * 15, rnd() * 12 * 15]",
"def populate_empty_list():\n\n from random import randint, seed\n seed(56)\n l = []\n for i in range(100):\n l.append(randint(0, 100))\n print(l[34:56])",
"def generate(self):\n self.lst=[]\n for i in range(8):\n self.mat= random.sample(range(100),8)\n self.lst.append(self.mat)\n return self.lst",
"def list_random_sample_numbers(min: int, max: int, length: int) -> List:\r\n result = random.sample(range(min, max), length)\r\n return result",
"def rand_list(n, limit):\n g = []\n while n > 0:\n g.append(random.randrange(limit))\n n -= 1\n return g",
"def grAList() -> list:\n return [2, 5, 6, 9, 10, 11, 13, 17, 18, 30]",
"def generate_digits(generator, size) :\n return [int(generator.random()*10) for i in range(size)]",
"def generate_random_list(self, n):\n return [self.generate_random_payload((int, str, float, bool, list, dict)) for i in range(n)]",
"def get_random_population():\r\n return [ get_random_individual() for _ in range(POPULATION_COUNT) ]",
"def random_num():\n my_list = [i for i in range(10)]\n num_list = random.sample(my_list, 4)\n while num_list[0] == 0:\n num_list = random.sample(my_list, 4)\n \n return num_list",
"def getValues(self):\n return [self._rng.normal(25,1)]",
"def _random_color() -> List[float]:\n return [np.random.uniform(), np.random.uniform(), np.random.uniform()]",
"def shuffle(self) -> List[int]:"
]
| [
"0.750624",
"0.7490539",
"0.7296255",
"0.7264398",
"0.72286063",
"0.721338",
"0.717384",
"0.71707714",
"0.7136113",
"0.71132106",
"0.7103014",
"0.7091646",
"0.70703304",
"0.7049899",
"0.703982",
"0.6956948",
"0.694967",
"0.6850403",
"0.68373185",
"0.6828457",
"0.68178284",
"0.68033445",
"0.6784065",
"0.67434686",
"0.67350465",
"0.6718174",
"0.66956735",
"0.6672832",
"0.6658471",
"0.6599049"
]
| 0.80803955 | 0 |
Test that the enrypt/decrypt cycle completes successfully for a nonframed message using the default algorithm. | def test_encryption_cycle_default_algorithm_non_framed(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=0,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def testCryptMessageRoundtrip(self):\n try:\n cu = CryptUtils()\n ky = cu.newKey()\n msg = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\\n\"\n encMsg = cu.encryptMessage(msg, ky)\n dcrMsg = cu.decryptMessage(encMsg, ky)\n self.assertEqual(msg, dcrMsg)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"def test_decrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = b'0' * 32\n\n encrypted = encrypt('message')\n assert decrypt(encrypted) == 'message'\n\n settings.CHITON_ENCRYPTION_KEY = b'1' * 32\n with pytest.raises(EncryptionError):\n decrypt(encrypted)",
"def test_decrypt_format(self):\n with pytest.raises(EncryptionError):\n decrypt('message')",
"def test_decode_messages():\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3",
"def test_encryption_cycle_default_algorithm_framed_stream(self):\n with aws_encryption_sdk.stream(\n source=io.BytesIO(VALUES[\"plaintext_128\"]),\n key_provider=self.kms_master_key_provider,\n mode=\"e\",\n encryption_context=VALUES[\"encryption_context\"],\n ) as encryptor:\n ciphertext = encryptor.read()\n header_1 = encryptor.header\n with aws_encryption_sdk.stream(\n source=io.BytesIO(ciphertext), key_provider=self.kms_master_key_provider, mode=\"d\"\n ) as decryptor:\n plaintext = decryptor.read()\n header_2 = decryptor.header\n assert plaintext == VALUES[\"plaintext_128\"]\n assert header_1.encryption_context == header_2.encryption_context",
"def test_decrypt_encrypted(self):\n encrypted = encrypt('message')\n decrypted = decrypt(encrypted)\n\n assert decrypted == 'message'",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)",
"def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'",
"def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_distributed_paillier_encrypt_decrypt_sequence(\n distributed_schemes: Tuple[DistributedPaillier, ...],\n) -> None:\n plaintexts = [1, 2, 3, -1, -2, -3, 1.5, 42.42424242, -1.5, -42.42424242]\n ciphertexts = []\n for plaintext in plaintexts:\n ciphertexts.append(distributed_schemes[0].encrypt(plaintext))\n\n decryptions = await asyncio.gather(\n *[\n distributed_schemes[i].decrypt_sequence(ciphertexts)\n for i in range(len(distributed_schemes))\n ]\n )\n\n for decryption_list in decryptions:\n for idx, decryption in enumerate(decryption_list):\n assert plaintexts[idx] == decryption",
"def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)",
"def test(cls):\n # https://tools.ietf.org/html/draft-kisa-hight-00#section-5\n old_num_rounds = cls.num_rounds\n cls.set_num_rounds(34)\n\n plaintext = (0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)\n key = (0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,\n 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff)\n assert cls(plaintext, key) == (0x00, 0xf4, 0x18, 0xae, 0xd9, 0x4f, 0x03, 0xf2)\n\n plaintext = (0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77)\n key = (0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00)\n assert cls(plaintext, key) == (0x23, 0xce, 0x9f, 0x72, 0xe5, 0x43, 0xe6, 0xd8)\n\n cls.set_num_rounds(old_num_rounds)",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_incorrect_decrypt_message(cipher):\n with pytest.raises(AssertionError):\n decrypted = cipher.decrypt('U6DQfhE17od2Qe4TPZFJHn3LOMkpPDqip77e4b5uv7s=')\n assert decrypted == 'Wrong string'",
"def test_decrypt_key_invalid(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=b'0' * 31)",
"async def test_distributed_paillier_encrypt_decrypt(\n distributed_schemes: Tuple[DistributedPaillier, ...],\n plaintext: Union[float, int],\n) -> None:\n enc = distributed_schemes[0].encrypt(plaintext)\n dec = await asyncio.gather(\n *[distributed_schemes[i].decrypt(enc) for i in range(len(distributed_schemes))]\n )\n assert all(d == plaintext for d in dec)",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)",
"def test_encryption_cycle_default_algorithm_framed_stream_many_lines(self):\n ciphertext = b\"\"\n with aws_encryption_sdk.stream(\n source=io.BytesIO(VALUES[\"plaintext_128\"] * 10),\n key_provider=self.kms_master_key_provider,\n mode=\"e\",\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=128,\n ) as encryptor:\n for chunk in encryptor:\n ciphertext += chunk\n header_1 = encryptor.header\n plaintext = b\"\"\n with aws_encryption_sdk.stream(\n source=io.BytesIO(ciphertext), key_provider=self.kms_master_key_provider, mode=\"d\"\n ) as decryptor:\n for chunk in decryptor:\n plaintext += chunk\n header_2 = decryptor.header\n assert plaintext == VALUES[\"plaintext_128\"] * 10\n assert header_1.encryption_context == header_2.encryption_context",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_fail():\n enig = Enigma()\n str1 = \"Hellow\"\n str2 = \"Potato\"\n en1 = enig.encode(str1)\n en2 = enig.encode(str2)\n de1 = enig.decode(en1)\n de2 = enig.decode(en2)\n\n assert_not_equal(str1, de1)\n assert_not_equal(str2, de2)",
"def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'",
"def test_decoder():\r\n #Check edge cases first\r\n assert decode_morse(123) == \"Ciphertext is not a string!\", \"Test 1 failed, input integer 123\"\r\n assert decode_morse(\"\") == \"\", \"Test 2 failed, input ''\"\r\n assert decode_morse(\"string\") == \"ERROR: I can't decode the following character: string \\nYour decoded message thus far is: \", \"Test 3 failed, input 'string'\"\r\n assert decode_morse(\".- ..- / .- . .--.-.-.-.-.-.-.-.-.-.\") == \"ERROR: I can't decode the following character: .--.-.-.-.-.-.-.-.-.-. \\nYour decoded message thus far is: AU AE\", \"Test 4 failed, input '.- ..- / .- . .--.-.-.-.-.-.-.-.-.-.'\" \r\n assert decode_morse(\"/\") == \"\", \"Test 5 failed, input '/'\" #My function parses the slash as ciphertext, but whitespace_sorter discards it as meaningless noise.\r\n #This is fair because both encoder and decoder ignore spaces presented by themselves, in plaintext and ciphertext respectively.\r\n \r\n #Now we run possible ciphertexts and check their corresponding plaintexts:\r\n assert decode_morse(\"- .... . / --.- ..- .. -.-. -.- / -... .-. --- .-- -. / ..-. --- -..- / .--- ..- -- .--. ... / --- ...- . .-. / - .... . / .-.. .- --.. -.-- / -.. --- --.\") == \"THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG\", \"Test 6 failed, input '- .... . / --.- ..- .. -.-. -.- / -... .-. --- .-- -. / ..-. --- -..- / .--- ..- -- .--. ... / --- ...- . .-. / - .... . / .-.. .- --.. -.-- / -.. --- --.'\"\r\n assert decode_morse(\".... .---- . .-. ----- .--. .... .--.-. -. - / .-.-. / .----.\") == \"H1ER0PH@NT + '\", \"Test 7 failed, input '.... .---- . .-. ----- .--. .... .--.-. -. - / .-.-. / .----.\"\r\n assert decode_morse(\".-..-. .----.\") == '\"' + \"'\", \"Test 8 failed, input '.-..-. .----.'\"\r\n \r\n #Check that input not mutated\r\n test_ciphertext_9 = \"- . ... -\"\r\n encode_morse(test_ciphertext_9)\r\n assert test_ciphertext_9 == \"- . ... -\", \"Test 9 failed, input '- . ... -' mutated\" \r\n \r\n #If all tests passed\r\n print (\"Congratulations! 9/9 tests passed!\")",
"def test():\n ip = init = '1.2.3.4'\n key = '\\xff'*16 \n iterations = 10\n for i in xrange(iterations):\n ip = encrypt(key, ip)\n if ip != '191.207.11.210':\n raise ValueError\n for i in xrange(iterations):\n ip = decrypt(key, ip)\n if ip != init:\n raise ValueError"
]
| [
"0.72198886",
"0.70477796",
"0.703508",
"0.692355",
"0.68278927",
"0.6626544",
"0.6580773",
"0.65805084",
"0.6580006",
"0.6514737",
"0.6499495",
"0.64881426",
"0.64735883",
"0.6394814",
"0.6348888",
"0.6316184",
"0.62909734",
"0.6271909",
"0.6263519",
"0.6253984",
"0.625003",
"0.6242077",
"0.62313896",
"0.6226996",
"0.6195239",
"0.6182252",
"0.6180133",
"0.61726797",
"0.61561173",
"0.6154061"
]
| 0.7250217 | 0 |
Test that the enrypt/decrypt cycle completes successfully for a nonframed message using the default algorithm. | def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"], key_provider=self.kms_master_key_provider, frame_length=0
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def testCryptMessageRoundtrip(self):\n try:\n cu = CryptUtils()\n ky = cu.newKey()\n msg = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\\n\"\n encMsg = cu.encryptMessage(msg, ky)\n dcrMsg = cu.decryptMessage(encMsg, ky)\n self.assertEqual(msg, dcrMsg)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"def test_decrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = b'0' * 32\n\n encrypted = encrypt('message')\n assert decrypt(encrypted) == 'message'\n\n settings.CHITON_ENCRYPTION_KEY = b'1' * 32\n with pytest.raises(EncryptionError):\n decrypt(encrypted)",
"def test_decrypt_format(self):\n with pytest.raises(EncryptionError):\n decrypt('message')",
"def test_encryption_cycle_default_algorithm_framed_stream(self):\n with aws_encryption_sdk.stream(\n source=io.BytesIO(VALUES[\"plaintext_128\"]),\n key_provider=self.kms_master_key_provider,\n mode=\"e\",\n encryption_context=VALUES[\"encryption_context\"],\n ) as encryptor:\n ciphertext = encryptor.read()\n header_1 = encryptor.header\n with aws_encryption_sdk.stream(\n source=io.BytesIO(ciphertext), key_provider=self.kms_master_key_provider, mode=\"d\"\n ) as decryptor:\n plaintext = decryptor.read()\n header_2 = decryptor.header\n assert plaintext == VALUES[\"plaintext_128\"]\n assert header_1.encryption_context == header_2.encryption_context",
"def test_decode_messages():\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3",
"def test_decrypt_encrypted(self):\n encrypted = encrypt('message')\n decrypted = decrypt(encrypted)\n\n assert decrypted == 'message'",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)",
"def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'",
"def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_distributed_paillier_encrypt_decrypt_sequence(\n distributed_schemes: Tuple[DistributedPaillier, ...],\n) -> None:\n plaintexts = [1, 2, 3, -1, -2, -3, 1.5, 42.42424242, -1.5, -42.42424242]\n ciphertexts = []\n for plaintext in plaintexts:\n ciphertexts.append(distributed_schemes[0].encrypt(plaintext))\n\n decryptions = await asyncio.gather(\n *[\n distributed_schemes[i].decrypt_sequence(ciphertexts)\n for i in range(len(distributed_schemes))\n ]\n )\n\n for decryption_list in decryptions:\n for idx, decryption in enumerate(decryption_list):\n assert plaintexts[idx] == decryption",
"def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)",
"def test(cls):\n # https://tools.ietf.org/html/draft-kisa-hight-00#section-5\n old_num_rounds = cls.num_rounds\n cls.set_num_rounds(34)\n\n plaintext = (0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)\n key = (0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,\n 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff)\n assert cls(plaintext, key) == (0x00, 0xf4, 0x18, 0xae, 0xd9, 0x4f, 0x03, 0xf2)\n\n plaintext = (0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77)\n key = (0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00)\n assert cls(plaintext, key) == (0x23, 0xce, 0x9f, 0x72, 0xe5, 0x43, 0xe6, 0xd8)\n\n cls.set_num_rounds(old_num_rounds)",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_incorrect_decrypt_message(cipher):\n with pytest.raises(AssertionError):\n decrypted = cipher.decrypt('U6DQfhE17od2Qe4TPZFJHn3LOMkpPDqip77e4b5uv7s=')\n assert decrypted == 'Wrong string'",
"def test_decrypt_key_invalid(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=b'0' * 31)",
"async def test_distributed_paillier_encrypt_decrypt(\n distributed_schemes: Tuple[DistributedPaillier, ...],\n plaintext: Union[float, int],\n) -> None:\n enc = distributed_schemes[0].encrypt(plaintext)\n dec = await asyncio.gather(\n *[distributed_schemes[i].decrypt(enc) for i in range(len(distributed_schemes))]\n )\n assert all(d == plaintext for d in dec)",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)",
"def test_encryption_cycle_default_algorithm_framed_stream_many_lines(self):\n ciphertext = b\"\"\n with aws_encryption_sdk.stream(\n source=io.BytesIO(VALUES[\"plaintext_128\"] * 10),\n key_provider=self.kms_master_key_provider,\n mode=\"e\",\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=128,\n ) as encryptor:\n for chunk in encryptor:\n ciphertext += chunk\n header_1 = encryptor.header\n plaintext = b\"\"\n with aws_encryption_sdk.stream(\n source=io.BytesIO(ciphertext), key_provider=self.kms_master_key_provider, mode=\"d\"\n ) as decryptor:\n for chunk in decryptor:\n plaintext += chunk\n header_2 = decryptor.header\n assert plaintext == VALUES[\"plaintext_128\"] * 10\n assert header_1.encryption_context == header_2.encryption_context",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_fail():\n enig = Enigma()\n str1 = \"Hellow\"\n str2 = \"Potato\"\n en1 = enig.encode(str1)\n en2 = enig.encode(str2)\n de1 = enig.decode(en1)\n de2 = enig.decode(en2)\n\n assert_not_equal(str1, de1)\n assert_not_equal(str2, de2)",
"def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'",
"def test_decoder():\r\n #Check edge cases first\r\n assert decode_morse(123) == \"Ciphertext is not a string!\", \"Test 1 failed, input integer 123\"\r\n assert decode_morse(\"\") == \"\", \"Test 2 failed, input ''\"\r\n assert decode_morse(\"string\") == \"ERROR: I can't decode the following character: string \\nYour decoded message thus far is: \", \"Test 3 failed, input 'string'\"\r\n assert decode_morse(\".- ..- / .- . .--.-.-.-.-.-.-.-.-.-.\") == \"ERROR: I can't decode the following character: .--.-.-.-.-.-.-.-.-.-. \\nYour decoded message thus far is: AU AE\", \"Test 4 failed, input '.- ..- / .- . .--.-.-.-.-.-.-.-.-.-.'\" \r\n assert decode_morse(\"/\") == \"\", \"Test 5 failed, input '/'\" #My function parses the slash as ciphertext, but whitespace_sorter discards it as meaningless noise.\r\n #This is fair because both encoder and decoder ignore spaces presented by themselves, in plaintext and ciphertext respectively.\r\n \r\n #Now we run possible ciphertexts and check their corresponding plaintexts:\r\n assert decode_morse(\"- .... . / --.- ..- .. -.-. -.- / -... .-. --- .-- -. / ..-. --- -..- / .--- ..- -- .--. ... / --- ...- . .-. / - .... . / .-.. .- --.. -.-- / -.. --- --.\") == \"THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG\", \"Test 6 failed, input '- .... . / --.- ..- .. -.-. -.- / -... .-. --- .-- -. / ..-. --- -..- / .--- ..- -- .--. ... / --- ...- . .-. / - .... . / .-.. .- --.. -.-- / -.. --- --.'\"\r\n assert decode_morse(\".... .---- . .-. ----- .--. .... .--.-. -. - / .-.-. / .----.\") == \"H1ER0PH@NT + '\", \"Test 7 failed, input '.... .---- . .-. ----- .--. .... .--.-. -. - / .-.-. / .----.\"\r\n assert decode_morse(\".-..-. .----.\") == '\"' + \"'\", \"Test 8 failed, input '.-..-. .----.'\"\r\n \r\n #Check that input not mutated\r\n test_ciphertext_9 = \"- . ... -\"\r\n encode_morse(test_ciphertext_9)\r\n assert test_ciphertext_9 == \"- . ... -\", \"Test 9 failed, input '- . ... -' mutated\" \r\n \r\n #If all tests passed\r\n print (\"Congratulations! 9/9 tests passed!\")",
"def test():\n ip = init = '1.2.3.4'\n key = '\\xff'*16 \n iterations = 10\n for i in xrange(iterations):\n ip = encrypt(key, ip)\n if ip != '191.207.11.210':\n raise ValueError\n for i in xrange(iterations):\n ip = decrypt(key, ip)\n if ip != init:\n raise ValueError"
]
| [
"0.72509044",
"0.7048452",
"0.7035682",
"0.69241333",
"0.68283325",
"0.6627613",
"0.6581378",
"0.65806115",
"0.658028",
"0.65148526",
"0.6499561",
"0.6489213",
"0.6473501",
"0.63943124",
"0.6348847",
"0.63158214",
"0.6292437",
"0.62737685",
"0.6263489",
"0.6254395",
"0.625021",
"0.6241529",
"0.6231175",
"0.6227485",
"0.6194722",
"0.6182149",
"0.6180775",
"0.61726993",
"0.6156701",
"0.6155247"
]
| 0.7220198 | 1 |
Test that the enrypt/decrypt cycle completes successfully for a single frame message using the default algorithm. | def test_encryption_cycle_default_algorithm_single_frame(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=1024,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_framed_stream(self):\n with aws_encryption_sdk.stream(\n source=io.BytesIO(VALUES[\"plaintext_128\"]),\n key_provider=self.kms_master_key_provider,\n mode=\"e\",\n encryption_context=VALUES[\"encryption_context\"],\n ) as encryptor:\n ciphertext = encryptor.read()\n header_1 = encryptor.header\n with aws_encryption_sdk.stream(\n source=io.BytesIO(ciphertext), key_provider=self.kms_master_key_provider, mode=\"d\"\n ) as decryptor:\n plaintext = decryptor.read()\n header_2 = decryptor.header\n assert plaintext == VALUES[\"plaintext_128\"]\n assert header_1.encryption_context == header_2.encryption_context",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_calculate_cipher_step():\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, \"HELLO THERE!\")\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number",
"def test_decode_messages():\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_main():\n\n encoder = HttpMoleCryptoEncoder('foobar.org', 'foobar')\n decoder = HttpMoleCryptoEncoder('foobar.org', 'foobar')\n retc = cb.mole.test.test_encoder(encoder, decoder=decoder)\n\n if retc == 0:\n print \"NO FAILURES / INCONCLUSIVE\"\n return retc",
"def testCryptMessageRoundtrip(self):\n try:\n cu = CryptUtils()\n ky = cu.newKey()\n msg = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\\n\"\n encMsg = cu.encryptMessage(msg, ky)\n dcrMsg = cu.decryptMessage(encMsg, ky)\n self.assertEqual(msg, dcrMsg)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_framed_stream_many_lines(self):\n ciphertext = b\"\"\n with aws_encryption_sdk.stream(\n source=io.BytesIO(VALUES[\"plaintext_128\"] * 10),\n key_provider=self.kms_master_key_provider,\n mode=\"e\",\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=128,\n ) as encryptor:\n for chunk in encryptor:\n ciphertext += chunk\n header_1 = encryptor.header\n plaintext = b\"\"\n with aws_encryption_sdk.stream(\n source=io.BytesIO(ciphertext), key_provider=self.kms_master_key_provider, mode=\"d\"\n ) as decryptor:\n for chunk in decryptor:\n plaintext += chunk\n header_2 = decryptor.header\n assert plaintext == VALUES[\"plaintext_128\"] * 10\n assert header_1.encryption_context == header_2.encryption_context",
"def test(cls):\n # https://tools.ietf.org/html/draft-kisa-hight-00#section-5\n old_num_rounds = cls.num_rounds\n cls.set_num_rounds(34)\n\n plaintext = (0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)\n key = (0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,\n 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff)\n assert cls(plaintext, key) == (0x00, 0xf4, 0x18, 0xae, 0xd9, 0x4f, 0x03, 0xf2)\n\n plaintext = (0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77)\n key = (0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00)\n assert cls(plaintext, key) == (0x23, 0xce, 0x9f, 0x72, 0xe5, 0x43, 0xe6, 0xd8)\n\n cls.set_num_rounds(old_num_rounds)",
"def test_encoder():\r\n #Check edge cases first\r\n assert encode_morse(123) == \"Plaintext is not a string!\", \"Test 1 failed, input integer 123\"\r\n assert encode_morse(\"\") == \"\", \"Test 2 failed, input ''\"\r\n assert encode_morse(\"^\") == \"ERROR: You can't encode the following character: ^\", \"Test 3 failed, input '^'\"\r\n assert encode_morse(\" e e \") == \". / .\", \"Test 4 failed, input ' e e '\"\r\n assert encode_morse(\"AbCd\") == \".- -... -.-. -..\", \"Test 5 failed, input 'AbCd'\"\r\n \r\n #Now we run possible plaintexts and check their corresponding ciphertexts\r\n assert encode_morse(\"the quick brown fox jumps over the lazy dog\") == \"- .... . / --.- ..- .. -.-. -.- / -... .-. --- .-- -. / ..-. --- -..- / .--- ..- -- .--. ... / --- ...- . .-. / - .... . / .-.. .- --.. -.-- / -.. --- --.\", \"Test 6 failed, input 'the quick brown fox jumps over the lazy dog'\"\r\n assert encode_morse(\"H1er0ph@nT + '\") == \".... .---- . .-. ----- .--. .... .--.-. -. - / .-.-. / .----.\", \"Test 7 failed, input 'H1er0ph@nT + ''\"\r\n assert encode_morse('\"' + \"'\") == \".-..-. .----.\", \"Test 8 failed, input ''(double apostrophe)' + '(single apostrophe)'\"\r\n \r\n #Check that input not mutated\r\n test_plaintext_9 = \"test\"\r\n encode_morse(test_plaintext_9)\r\n assert test_plaintext_9 == \"test\", \"Test 9 failed, input 'test' mutated\"\r\n \r\n #If all tests passed\r\n print (\"Congratulations! 9/9 tests passed!\")",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_enc_FOR_MOTHER_RUSSIA(self):\n # test machine\n e1 = core.Machine(plugboard=['ZU', 'HL', 'CQ', 'WM', 'OA', 'PY', 'EB', 'TR', 'DN', 'VI'], settings=['X','I','S'],rotors=[erotors.M3_IV,erotors.M3_V, erotors.M3_II], offsets=['N','O','C'], reflector=reflectors.B)\n\n # set state\n e1.encrypt('MLD')\n \n # assert encryption output\n self.assertEqual(e1._buffer.decode(), 'DOR')\n\n\n e1 = core.Machine(plugboard=['ZU', 'HL', 'CQ', 'WM', 'OA', 'PY', 'EB', 'TR', 'DN', 'VI'], settings=['X','I','S'],rotors=[erotors.M3_IV,erotors.M3_V, erotors.M3_II], offsets=['R','O','D'], reflector=reflectors.B)\n\n\n # set state\n e1.encrypt('UMDPQ CUAQN LVVSP IARKC TTRJQ KCFPT OKRGO ZXALD RLPUH AUZSO SZFSU GWFNF DZCUG VEXUU LQYXO TCYRP SYGGZ HQMAG PZDKC KGOJM MYYDD H')\n\n print(e1._buffer.decode())\n\n self.assertEqual(e1._buffer.decode(), \"GROUP SOUTH COMMA NDFRO MGENP AULUS XSIXT HARMY ISENC IRCLE DXOPE RATIO NBLAU FAILE DXCOM MENCE RELIE FOPER ATION IMMED IATEL Y\")",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]"
]
| [
"0.71647835",
"0.6868584",
"0.67947006",
"0.67754734",
"0.66704136",
"0.6647159",
"0.6578349",
"0.6536304",
"0.6478507",
"0.6441947",
"0.64351726",
"0.64146996",
"0.63911974",
"0.6362077",
"0.63369554",
"0.63120055",
"0.62790716",
"0.62682337",
"0.62535655",
"0.62238795",
"0.62044907",
"0.62008554",
"0.61632913",
"0.61446446",
"0.607419",
"0.6068036",
"0.6064775",
"0.6047777",
"0.60293293",
"0.60170066"
]
| 0.7332935 | 0 |
Test that the enrypt/decrypt cycle completes successfully for a framed message with multiple frames using the default algorithm. | def test_encryption_cycle_default_algorithm_multiple_frames(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"] * 100,
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=1024,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] * 100 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_framed_stream_many_lines(self):\n ciphertext = b\"\"\n with aws_encryption_sdk.stream(\n source=io.BytesIO(VALUES[\"plaintext_128\"] * 10),\n key_provider=self.kms_master_key_provider,\n mode=\"e\",\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=128,\n ) as encryptor:\n for chunk in encryptor:\n ciphertext += chunk\n header_1 = encryptor.header\n plaintext = b\"\"\n with aws_encryption_sdk.stream(\n source=io.BytesIO(ciphertext), key_provider=self.kms_master_key_provider, mode=\"d\"\n ) as decryptor:\n for chunk in decryptor:\n plaintext += chunk\n header_2 = decryptor.header\n assert plaintext == VALUES[\"plaintext_128\"] * 10\n assert header_1.encryption_context == header_2.encryption_context",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_framed_stream(self):\n with aws_encryption_sdk.stream(\n source=io.BytesIO(VALUES[\"plaintext_128\"]),\n key_provider=self.kms_master_key_provider,\n mode=\"e\",\n encryption_context=VALUES[\"encryption_context\"],\n ) as encryptor:\n ciphertext = encryptor.read()\n header_1 = encryptor.header\n with aws_encryption_sdk.stream(\n source=io.BytesIO(ciphertext), key_provider=self.kms_master_key_provider, mode=\"d\"\n ) as decryptor:\n plaintext = decryptor.read()\n header_2 = decryptor.header\n assert plaintext == VALUES[\"plaintext_128\"]\n assert header_1.encryption_context == header_2.encryption_context",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_decode_messages():\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3",
"def test_multi_line():\n\n for protocol in LEGACY_PROTOCOLS:\n p = protocol([])\n\n test_case = [\n \"48 6B 10 49 02 01 00 01 02 03 FF\",\n \"48 6B 10 49 02 02 04 05 06 07 FF\",\n \"48 6B 10 49 02 03 08 09 0A 0B FF\",\n ]\n\n correct_data = [0x49, 0x02] + list(range(12))\n\n # in-order\n r = p(test_case)\n assert len(r) == 1\n check_message(r[0], len(test_case), 0x10, correct_data)\n\n # test a few out-of-order cases\n for n in range(4):\n random.shuffle(test_case) # mix up the frame strings\n r = p(test_case)\n assert len(r) == 1\n check_message(r[0], len(test_case), 0x10, correct_data)",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def testFramepack1(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n\n def bad():\n frame.cmd = 'SOME UNNOWN CMD'\n\n self.assertRaises(stomper.FrameError, bad)\n\n # Generate a MESSAGE frame:\n frame = stomper.Frame()\n frame.cmd = 'MESSAGE'\n frame.headers['destination'] = '/queue/a'\n frame.headers['message-id'] = 'card_data'\n frame.body = \"hello queue a\"\n result = frame.pack()\n\n# print \"\\n-- result \" + \"----\" * 10\n# pprint.pprint(result)\n# print\n\n # Try bad message unpack catching:\n bad_frame = stomper.Frame()\n self.assertRaises(stomper.FrameError, bad_frame.unpack, None)\n self.assertRaises(stomper.FrameError, bad_frame.unpack, '')\n\n # Try to read the generated frame back in\n # and then check the variables are set up\n # correctly:\n frame2 = stomper.Frame()\n frame2.unpack(result)\n\n self.assertEqual(frame2.cmd, 'MESSAGE')\n self.assertEqual(frame2.headers['destination'], '/queue/a')\n self.assertEqual(frame2.headers['message-id'], 'card_data')\n self.assertEqual(frame2.body, 'hello queue a')\n result = frame2.pack()\n\n correct = \"MESSAGE\\ndestination:/queue/a\\nmessage-id:card_data\\n\\nhello queue a\\x00\\n\"\n\n# print \"result: \"\n# pprint.pprint(result)\n# print\n# print \"correct: \"\n# pprint.pprint(correct)\n# print\n#\n self.assertEqual(result, correct)\n\n result = stomper.unpack_frame(result)\n\n self.assertEqual(result['cmd'], 'MESSAGE')\n self.assertEqual(result['headers']['destination'], '/queue/a')\n self.assertEqual(result['headers']['message-id'], 'card_data')\n self.assertEqual(result['body'], 'hello queue a')",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_calculate_cipher_step():\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, \"HELLO THERE!\")\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number",
"def test_main():\n\n encoder = HttpMoleCryptoEncoder('foobar.org', 'foobar')\n decoder = HttpMoleCryptoEncoder('foobar.org', 'foobar')\n retc = cb.mole.test.test_encoder(encoder, decoder=decoder)\n\n if retc == 0:\n print \"NO FAILURES / INCONCLUSIVE\"\n return retc"
]
| [
"0.7067218",
"0.6682916",
"0.66145265",
"0.65793484",
"0.6564629",
"0.64674115",
"0.643743",
"0.64061713",
"0.6394333",
"0.63934284",
"0.63829815",
"0.6313884",
"0.63053197",
"0.630154",
"0.6298164",
"0.6282151",
"0.62698716",
"0.6220615",
"0.61705005",
"0.6101196",
"0.61000293",
"0.6045768",
"0.6029499",
"0.60040414",
"0.59999466",
"0.5964296",
"0.59197414",
"0.59069633",
"0.58960116",
"0.5851421"
]
| 0.7274183 | 0 |
Test that the enrypt/decrypt cycle completes successfully for a single frame message using the aes_128_gcm_iv12_tag16 algorithm. | def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=1024,
algorithm=Algorithm.AES_128_GCM_IV12_TAG16,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def test_encryption_of_string(cipher):\n iv = Random.new().read(AES.block_size)\n encrypted = cipher.encrypt(message)\n assert base64.b64encode(base64.b64decode(encrypted)) == encrypted",
"def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))",
"def test_encryption_cycle_default_algorithm_framed_stream(self):\n with aws_encryption_sdk.stream(\n source=io.BytesIO(VALUES[\"plaintext_128\"]),\n key_provider=self.kms_master_key_provider,\n mode=\"e\",\n encryption_context=VALUES[\"encryption_context\"],\n ) as encryptor:\n ciphertext = encryptor.read()\n header_1 = encryptor.header\n with aws_encryption_sdk.stream(\n source=io.BytesIO(ciphertext), key_provider=self.kms_master_key_provider, mode=\"d\"\n ) as decryptor:\n plaintext = decryptor.read()\n header_2 = decryptor.header\n assert plaintext == VALUES[\"plaintext_128\"]\n assert header_1.encryption_context == header_2.encryption_context",
"def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)",
"def gk_handshake_1_2_aes( self , packet ):\n\t\ttry:\n\n\t\t\t# Decapsulate the TKIP packet, and rebuild the plaintext packet.\n\t\t\tplaintext\t= self.handleAES.decapsulate( packet , self.TK )\n\t\t\tpacket \t\t= LLC()/SNAP()/EAPOL()/EAPOL_Key()/EAPOL_WPAKey()\n\t\t\tnew_packet \t= packet.__class__( plaintext )\n\t\t\t\n\t\t\t# Assert on the flags in the Key Information to verify it is GKHS Message 1/2.\n\t\t\tkeyinfoReceived \t= new_packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\t\tself.__setKeyIDFromFlaglist( self.__getFlaglist( keyinfoReceived ) )\n\t\t\tflaglist\t\t= ['HMAC_SHA1_AES','group','ack','mic','secure']\n\t\t\tflaglist.append( self.keyID ) # Copying the Key ID from the received packet.\n\t\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t\t'The received packet is not Group Key Handshake Message 1/2.'\n\t\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL Group Key Handshake Message 1/2 AES' )\n\t\t\t\n\t\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\t\tself.__assertWPAKeyMIC( new_packet , Crypto.Hash.SHA )\n\n\t\t\t# Update the Replay Counter.\n\t\t\tself.replayCounter\t= new_packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\t\t\n\t\t\t# Retrieve the Group Temporal key.\n\t\t\tself.GTK = self.handleAES.unwrapKey( new_packet.WPAKey , self.KEK ) # Resulting key of 16/32 octets.\n\t\t\tself.logger.logKey( 'Group Temporal Key' , self.GTK )\n\t\t\t\n\t\texcept:\n\t\t\traise"
]
| [
"0.83253515",
"0.8318194",
"0.8020325",
"0.79975146",
"0.79973",
"0.79532135",
"0.78981006",
"0.7864936",
"0.7814415",
"0.78118306",
"0.77208996",
"0.7692941",
"0.7652597",
"0.7610188",
"0.7565554",
"0.7534197",
"0.74828565",
"0.66384315",
"0.6446058",
"0.61986744",
"0.61799836",
"0.61601263",
"0.58810294",
"0.5856334",
"0.5822344",
"0.5750499",
"0.5715059",
"0.56278867",
"0.5594933",
"0.5592579"
]
| 0.83224666 | 1 |
Test that the enrypt/decrypt cycle completes successfully for a nonframed message using the aes_128_gcm_iv12_tag16 algorithm. | def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=0,
algorithm=Algorithm.AES_128_GCM_IV12_TAG16,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def test_encryption_of_string(cipher):\n iv = Random.new().read(AES.block_size)\n encrypted = cipher.encrypt(message)\n assert base64.b64encode(base64.b64decode(encrypted)) == encrypted",
"def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)",
"def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):",
"def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)",
"def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))"
]
| [
"0.8125926",
"0.812481",
"0.78688955",
"0.78336215",
"0.78271556",
"0.7804262",
"0.77557755",
"0.77254546",
"0.7697716",
"0.7648585",
"0.75668395",
"0.7490993",
"0.740553",
"0.7383814",
"0.73778427",
"0.7327657",
"0.7217686",
"0.6693059",
"0.6647088",
"0.6601028",
"0.6460592",
"0.64424706",
"0.6425131",
"0.62911516",
"0.6188626",
"0.6126867",
"0.6069197",
"0.60319597",
"0.59522355",
"0.595144"
]
| 0.81351954 | 0 |
Test that the enrypt/decrypt cycle completes successfully for a single frame message using the aes_192_gcm_iv12_tag16 algorithm. | def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=1024,
algorithm=Algorithm.AES_192_GCM_IV12_TAG16,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def gk_handshake_1_2_aes( self , packet ):\n\t\ttry:\n\n\t\t\t# Decapsulate the TKIP packet, and rebuild the plaintext packet.\n\t\t\tplaintext\t= self.handleAES.decapsulate( packet , self.TK )\n\t\t\tpacket \t\t= LLC()/SNAP()/EAPOL()/EAPOL_Key()/EAPOL_WPAKey()\n\t\t\tnew_packet \t= packet.__class__( plaintext )\n\t\t\t\n\t\t\t# Assert on the flags in the Key Information to verify it is GKHS Message 1/2.\n\t\t\tkeyinfoReceived \t= new_packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\t\tself.__setKeyIDFromFlaglist( self.__getFlaglist( keyinfoReceived ) )\n\t\t\tflaglist\t\t= ['HMAC_SHA1_AES','group','ack','mic','secure']\n\t\t\tflaglist.append( self.keyID ) # Copying the Key ID from the received packet.\n\t\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t\t'The received packet is not Group Key Handshake Message 1/2.'\n\t\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL Group Key Handshake Message 1/2 AES' )\n\t\t\t\n\t\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\t\tself.__assertWPAKeyMIC( new_packet , Crypto.Hash.SHA )\n\n\t\t\t# Update the Replay Counter.\n\t\t\tself.replayCounter\t= new_packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\t\t\n\t\t\t# Retrieve the Group Temporal key.\n\t\t\tself.GTK = self.handleAES.unwrapKey( new_packet.WPAKey , self.KEK ) # Resulting key of 16/32 octets.\n\t\t\tself.logger.logKey( 'Group Temporal Key' , self.GTK )\n\t\t\t\n\t\texcept:\n\t\t\traise",
"def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))",
"def test_encryption_of_string(cipher):\n iv = Random.new().read(AES.block_size)\n encrypted = cipher.encrypt(message)\n assert base64.b64encode(base64.b64decode(encrypted)) == encrypted",
"def calculate_message_authentication_code_cbc(\n key: bytes,\n additional_data: bytes,\n payload: bytes = b\"\",\n block_0: bytes = bytes(16),\n) -> bytes:\n blocks = (\n block_0 + len(additional_data).to_bytes(2, \"big\") + additional_data + payload\n )\n y_cipher = Cipher(algorithms.AES(key), modes.CBC(bytes(16)))\n y_encryptor = y_cipher.encryptor()\n y_blocks = (\n y_encryptor.update(byte_pad(blocks, block_size=16)) + y_encryptor.finalize()\n )\n # only calculate, no ctr encryption\n return y_blocks[-16:]",
"def test_decode_messages():\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3"
]
| [
"0.82340854",
"0.81368446",
"0.8037293",
"0.794402",
"0.7889821",
"0.78590316",
"0.772915",
"0.77018535",
"0.76803374",
"0.7665735",
"0.75949967",
"0.752944",
"0.7509843",
"0.74839497",
"0.74594146",
"0.7330118",
"0.73176104",
"0.6417498",
"0.62863433",
"0.6270313",
"0.59897995",
"0.59773177",
"0.5951844",
"0.5909234",
"0.577166",
"0.5766398",
"0.57650214",
"0.56954294",
"0.56692415",
"0.5649672"
]
| 0.8333003 | 0 |
Test that the enrypt/decrypt cycle completes successfully for a nonframed message using the aes_192_gcm_iv12_tag16 algorithm. | def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=0,
algorithm=Algorithm.AES_192_GCM_IV12_TAG16,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):",
"def test():\n\n block_size = 16\n\n # Test case 1: incorrect value < required:\n paddedMsg = b'ICE ICE BABY\\x03\\x03\\x03\\x03'\n remove_padding(paddedMsg, block_size)\n\n # Test caes 2: incorrect value > required:\n paddedMsg = b\"ICE ICE BABY\\x05\\x05\\x05\\x05\" \n remove_padding(paddedMsg, block_size)\n\n # Test case 3: incorrect length:\n paddedMsg = b\"ICE ICE BABY\\x04\\x04\\x04\"\n remove_padding(paddedMsg, block_size)\n\n # Test case 4: variable numbers:\n paddedMsg = b\"ICE ICE BABY\\x01\\x02\\x03\\x04\"\n remove_padding(paddedMsg, block_size)\n\n # Test case 5: correct padding \n paddedMsg = b\"ICE ICE BABY\\x04\\x04\\x04\\x04\"\n remove_padding(paddedMsg, block_size)",
"def test_encryption_of_string(cipher):\n iv = Random.new().read(AES.block_size)\n encrypted = cipher.encrypt(message)\n assert base64.b64encode(base64.b64decode(encrypted)) == encrypted",
"def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)",
"def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))"
]
| [
"0.8052337",
"0.7957958",
"0.78567165",
"0.77485675",
"0.7733715",
"0.7628773",
"0.76000386",
"0.75640064",
"0.75172585",
"0.7472284",
"0.74037635",
"0.73793584",
"0.7221947",
"0.71908444",
"0.7173333",
"0.71495783",
"0.7008892",
"0.6746724",
"0.6503002",
"0.64997023",
"0.64591926",
"0.639052",
"0.6182967",
"0.60964346",
"0.6094317",
"0.6065784",
"0.6041725",
"0.60372615",
"0.5987627",
"0.59772795"
]
| 0.8143048 | 0 |
Test that the enrypt/decrypt cycle completes successfully for a single frame message using the aes_256_gcm_iv12_tag16 algorithm. | def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=1024,
algorithm=Algorithm.AES_256_GCM_IV12_TAG16,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()",
"def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))",
"def gk_handshake_1_2_aes( self , packet ):\n\t\ttry:\n\n\t\t\t# Decapsulate the TKIP packet, and rebuild the plaintext packet.\n\t\t\tplaintext\t= self.handleAES.decapsulate( packet , self.TK )\n\t\t\tpacket \t\t= LLC()/SNAP()/EAPOL()/EAPOL_Key()/EAPOL_WPAKey()\n\t\t\tnew_packet \t= packet.__class__( plaintext )\n\t\t\t\n\t\t\t# Assert on the flags in the Key Information to verify it is GKHS Message 1/2.\n\t\t\tkeyinfoReceived \t= new_packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\t\tself.__setKeyIDFromFlaglist( self.__getFlaglist( keyinfoReceived ) )\n\t\t\tflaglist\t\t= ['HMAC_SHA1_AES','group','ack','mic','secure']\n\t\t\tflaglist.append( self.keyID ) # Copying the Key ID from the received packet.\n\t\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t\t'The received packet is not Group Key Handshake Message 1/2.'\n\t\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL Group Key Handshake Message 1/2 AES' )\n\t\t\t\n\t\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\t\tself.__assertWPAKeyMIC( new_packet , Crypto.Hash.SHA )\n\n\t\t\t# Update the Replay Counter.\n\t\t\tself.replayCounter\t= new_packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\t\t\n\t\t\t# Retrieve the Group Temporal key.\n\t\t\tself.GTK = self.handleAES.unwrapKey( new_packet.WPAKey , self.KEK ) # Resulting key of 16/32 octets.\n\t\t\tself.logger.logKey( 'Group Temporal Key' , self.GTK )\n\t\t\t\n\t\texcept:\n\t\t\traise",
"def test_encryption_of_string(cipher):\n iv = Random.new().read(AES.block_size)\n encrypted = cipher.encrypt(message)\n assert base64.b64encode(base64.b64decode(encrypted)) == encrypted",
"def test_decode_messages():\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3",
"def fw_handshake_3_4_aes( self , packet ):\n\t\t# Check if the Frame Check Sequence (FCS) flag is set in the Radiotap header, and\n\t\t# if so assert the correctness of the FCS.\n\t\tradiotapFCSFlag = hasFCS( packet )\n\t\tif radiotapFCSFlag is True:\n\t\t\tassertDot11FCS( packet )\n\t\t\tpacket.getlayer( EAPOL_WPAKey ).remove_payload() # Remove the FCS.\n\t\t\t\n\t\t# Assert on the flags in the Key Information to verify it is FWHS Message 3/4.\n\t\tkeyinfoReceived \t= packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\tself.replayCounter\t= packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\tflaglist\t\t= ['HMAC_SHA1_AES','idx0','pairwise','install','ack','mic']\n\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t'The received packet is not 4-Way Handshake Message 3/4.'\n\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL 4-Way Handshake Message 3/4 AES' )\n\t\t\t\n\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\tself.__assertWPAKeyMIC( packet , Crypto.Hash.SHA )"
]
| [
"0.82659876",
"0.8149171",
"0.7972375",
"0.7958142",
"0.7951207",
"0.7863249",
"0.78558993",
"0.7819351",
"0.77480054",
"0.7725433",
"0.770075",
"0.7648856",
"0.7616764",
"0.75784475",
"0.7509083",
"0.74368125",
"0.7422427",
"0.65046304",
"0.63733006",
"0.6131729",
"0.6083416",
"0.6053862",
"0.5839924",
"0.581152",
"0.57924217",
"0.5732026",
"0.5699042",
"0.56939757",
"0.5619044",
"0.5616101"
]
| 0.82665855 | 0 |
Test that the enrypt/decrypt cycle completes successfully for a nonframed message using the aes_256_gcm_iv12_tag16 algorithm. | def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=0,
algorithm=Algorithm.AES_256_GCM_IV12_TAG16,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):",
"def test_encryption_of_string(cipher):\n iv = Random.new().read(AES.block_size)\n encrypted = cipher.encrypt(message)\n assert base64.b64encode(base64.b64decode(encrypted)) == encrypted",
"def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)",
"def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)",
"def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))"
]
| [
"0.80557966",
"0.79471004",
"0.7786498",
"0.77632356",
"0.7734715",
"0.77075064",
"0.76572573",
"0.76069474",
"0.7582958",
"0.75700486",
"0.7485742",
"0.74492884",
"0.7329467",
"0.7287656",
"0.7265688",
"0.7225671",
"0.7110978",
"0.6608641",
"0.65697694",
"0.64846396",
"0.64070725",
"0.6405138",
"0.62642",
"0.61929274",
"0.6154104",
"0.6059849",
"0.60596365",
"0.60589814",
"0.6010847",
"0.5948092"
]
| 0.8068432 | 0 |
Test that the enrypt/decrypt cycle completes successfully for a single frame message using the aes_192_gcm_iv12_tag16_hkdf_sha256 algorithm. | def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=1024,
algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def gk_handshake_1_2_aes( self , packet ):\n\t\ttry:\n\n\t\t\t# Decapsulate the TKIP packet, and rebuild the plaintext packet.\n\t\t\tplaintext\t= self.handleAES.decapsulate( packet , self.TK )\n\t\t\tpacket \t\t= LLC()/SNAP()/EAPOL()/EAPOL_Key()/EAPOL_WPAKey()\n\t\t\tnew_packet \t= packet.__class__( plaintext )\n\t\t\t\n\t\t\t# Assert on the flags in the Key Information to verify it is GKHS Message 1/2.\n\t\t\tkeyinfoReceived \t= new_packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\t\tself.__setKeyIDFromFlaglist( self.__getFlaglist( keyinfoReceived ) )\n\t\t\tflaglist\t\t= ['HMAC_SHA1_AES','group','ack','mic','secure']\n\t\t\tflaglist.append( self.keyID ) # Copying the Key ID from the received packet.\n\t\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t\t'The received packet is not Group Key Handshake Message 1/2.'\n\t\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL Group Key Handshake Message 1/2 AES' )\n\t\t\t\n\t\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\t\tself.__assertWPAKeyMIC( new_packet , Crypto.Hash.SHA )\n\n\t\t\t# Update the Replay Counter.\n\t\t\tself.replayCounter\t= new_packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\t\t\n\t\t\t# Retrieve the Group Temporal key.\n\t\t\tself.GTK = self.handleAES.unwrapKey( new_packet.WPAKey , self.KEK ) # Resulting key of 16/32 octets.\n\t\t\tself.logger.logKey( 'Group Temporal Key' , self.GTK )\n\t\t\t\n\t\texcept:\n\t\t\traise",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def fw_handshake_3_4_aes( self , packet ):\n\t\t# Check if the Frame Check Sequence (FCS) flag is set in the Radiotap header, and\n\t\t# if so assert the correctness of the FCS.\n\t\tradiotapFCSFlag = hasFCS( packet )\n\t\tif radiotapFCSFlag is True:\n\t\t\tassertDot11FCS( packet )\n\t\t\tpacket.getlayer( EAPOL_WPAKey ).remove_payload() # Remove the FCS.\n\t\t\t\n\t\t# Assert on the flags in the Key Information to verify it is FWHS Message 3/4.\n\t\tkeyinfoReceived \t= packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\tself.replayCounter\t= packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\tflaglist\t\t= ['HMAC_SHA1_AES','idx0','pairwise','install','ack','mic']\n\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t'The received packet is not 4-Way Handshake Message 3/4.'\n\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL 4-Way Handshake Message 3/4 AES' )\n\t\t\t\n\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\tself.__assertWPAKeyMIC( packet , Crypto.Hash.SHA )",
"def verify_hmac(self, payload):\r\n \r\n new_hmac = hmac.new(bytes(self.passphrase), b'%s'%(payload['eiv']) , hashlib.sha224)\r\n new_hmac.update(b'%s'%(payload['enid']))\r\n new_hmac.update(b'%s'%(payload['ed']))\r\n new_hmac.update(self.sessionID)\r\n #print(new_hmac.digest())\r\n #print(b'%s'%(payload['hmac']))\r\n if b'%s'%(payload['hmac']) == new_hmac.digest():\r\n return \"Successful Decryption\"\r\n return \"Failed Authentication\"",
"def test_main_decrypt(self, mock_context, mock_create_aws, mock_gen):\n context = ef_password.EFPWContext()\n context.env, context.service, context.decrypt = self.env, self.service, base64.b64encode(self.secret)\n mock_context.return_value = context\n mock_create_aws.return_value = {\"kms\": self.mock_kms}\n ef_password.main()\n mock_gen.assert_not_called()\n self.mock_kms.encrypt.assert_not_called()\n self.mock_kms.decrypt.assert_called_once_with(CiphertextBlob=self.secret)"
]
| [
"0.8300203",
"0.82431793",
"0.8080313",
"0.80119574",
"0.7955447",
"0.7933083",
"0.79175746",
"0.7875227",
"0.78650355",
"0.7831236",
"0.77585673",
"0.7638734",
"0.7600599",
"0.7563252",
"0.75303715",
"0.7509076",
"0.7427312",
"0.6463067",
"0.63739026",
"0.62837136",
"0.60598445",
"0.6000886",
"0.59612375",
"0.5957181",
"0.5953939",
"0.5949688",
"0.59010136",
"0.58605975",
"0.5855562",
"0.56497604"
]
| 0.83834475 | 0 |
Test that the enrypt/decrypt cycle completes successfully for a nonframed message using the aes_192_gcm_iv12_tag16_hkdf_sha256 algorithm. | def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=0,
algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):",
"def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)",
"def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'",
"def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)"
]
| [
"0.8106477",
"0.8044833",
"0.7915947",
"0.7796911",
"0.7775928",
"0.77545696",
"0.77477324",
"0.7743791",
"0.774213",
"0.7659902",
"0.76441413",
"0.753039",
"0.7520855",
"0.7500556",
"0.74554074",
"0.7379598",
"0.73441225",
"0.6818662",
"0.65952843",
"0.65744424",
"0.6453497",
"0.6394543",
"0.62774724",
"0.6257614",
"0.61982",
"0.61508",
"0.6077202",
"0.6076829",
"0.60465246",
"0.60178477"
]
| 0.8183349 | 0 |
Test that the enrypt/decrypt cycle completes successfully for a single frame message using the aes_256_gcm_iv12_tag16_hkdf_sha256 algorithm. | def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=1024,
algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()",
"def gk_handshake_1_2_aes( self , packet ):\n\t\ttry:\n\n\t\t\t# Decapsulate the TKIP packet, and rebuild the plaintext packet.\n\t\t\tplaintext\t= self.handleAES.decapsulate( packet , self.TK )\n\t\t\tpacket \t\t= LLC()/SNAP()/EAPOL()/EAPOL_Key()/EAPOL_WPAKey()\n\t\t\tnew_packet \t= packet.__class__( plaintext )\n\t\t\t\n\t\t\t# Assert on the flags in the Key Information to verify it is GKHS Message 1/2.\n\t\t\tkeyinfoReceived \t= new_packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\t\tself.__setKeyIDFromFlaglist( self.__getFlaglist( keyinfoReceived ) )\n\t\t\tflaglist\t\t= ['HMAC_SHA1_AES','group','ack','mic','secure']\n\t\t\tflaglist.append( self.keyID ) # Copying the Key ID from the received packet.\n\t\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t\t'The received packet is not Group Key Handshake Message 1/2.'\n\t\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL Group Key Handshake Message 1/2 AES' )\n\t\t\t\n\t\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\t\tself.__assertWPAKeyMIC( new_packet , Crypto.Hash.SHA )\n\n\t\t\t# Update the Replay Counter.\n\t\t\tself.replayCounter\t= new_packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\t\t\n\t\t\t# Retrieve the Group Temporal key.\n\t\t\tself.GTK = self.handleAES.unwrapKey( new_packet.WPAKey , self.KEK ) # Resulting key of 16/32 octets.\n\t\t\tself.logger.logKey( 'Group Temporal Key' , self.GTK )\n\t\t\t\n\t\texcept:\n\t\t\traise",
"def verify_hmac(self, payload):\r\n \r\n new_hmac = hmac.new(bytes(self.passphrase), b'%s'%(payload['eiv']) , hashlib.sha224)\r\n new_hmac.update(b'%s'%(payload['enid']))\r\n new_hmac.update(b'%s'%(payload['ed']))\r\n new_hmac.update(self.sessionID)\r\n #print(new_hmac.digest())\r\n #print(b'%s'%(payload['hmac']))\r\n if b'%s'%(payload['hmac']) == new_hmac.digest():\r\n return \"Successful Decryption\"\r\n return \"Failed Authentication\"",
"def fw_handshake_3_4_aes( self , packet ):\n\t\t# Check if the Frame Check Sequence (FCS) flag is set in the Radiotap header, and\n\t\t# if so assert the correctness of the FCS.\n\t\tradiotapFCSFlag = hasFCS( packet )\n\t\tif radiotapFCSFlag is True:\n\t\t\tassertDot11FCS( packet )\n\t\t\tpacket.getlayer( EAPOL_WPAKey ).remove_payload() # Remove the FCS.\n\t\t\t\n\t\t# Assert on the flags in the Key Information to verify it is FWHS Message 3/4.\n\t\tkeyinfoReceived \t= packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\tself.replayCounter\t= packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\tflaglist\t\t= ['HMAC_SHA1_AES','idx0','pairwise','install','ack','mic']\n\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t'The received packet is not 4-Way Handshake Message 3/4.'\n\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL 4-Way Handshake Message 3/4 AES' )\n\t\t\t\n\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\tself.__assertWPAKeyMIC( packet , Crypto.Hash.SHA )",
"def test_encryption_cycle_default_algorithm_framed_stream(self):\n with aws_encryption_sdk.stream(\n source=io.BytesIO(VALUES[\"plaintext_128\"]),\n key_provider=self.kms_master_key_provider,\n mode=\"e\",\n encryption_context=VALUES[\"encryption_context\"],\n ) as encryptor:\n ciphertext = encryptor.read()\n header_1 = encryptor.header\n with aws_encryption_sdk.stream(\n source=io.BytesIO(ciphertext), key_provider=self.kms_master_key_provider, mode=\"d\"\n ) as decryptor:\n plaintext = decryptor.read()\n header_2 = decryptor.header\n assert plaintext == VALUES[\"plaintext_128\"]\n assert header_1.encryption_context == header_2.encryption_context"
]
| [
"0.8349734",
"0.8254614",
"0.8019414",
"0.801054",
"0.7972647",
"0.7942672",
"0.7920008",
"0.7875361",
"0.78438544",
"0.77628016",
"0.77153695",
"0.7620499",
"0.76162994",
"0.758943",
"0.74901235",
"0.73929536",
"0.7372713",
"0.6539147",
"0.6385468",
"0.623899",
"0.60363567",
"0.60156465",
"0.59652746",
"0.59314924",
"0.59096533",
"0.58992285",
"0.5891892",
"0.5825685",
"0.5821607",
"0.57051504"
]
| 0.8321763 | 1 |
Test that the enrypt/decrypt cycle completes successfully for a nonframed message using the aes_256_gcm_iv12_tag16_hkdf_sha256 algorithm. | def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=0,
algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))",
"def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)",
"def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'",
"def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)",
"def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):"
]
| [
"0.8120955",
"0.80299413",
"0.7859964",
"0.77964133",
"0.77957535",
"0.7729641",
"0.7724217",
"0.7703868",
"0.76865077",
"0.76191044",
"0.7574905",
"0.754382",
"0.74360013",
"0.7435429",
"0.73939097",
"0.73673207",
"0.72987735",
"0.6695215",
"0.6554786",
"0.6514424",
"0.649278",
"0.6481008",
"0.6331122",
"0.6327569",
"0.62490946",
"0.61619127",
"0.614172",
"0.6100911",
"0.6092771",
"0.6083596"
]
| 0.81052995 | 1 |
Test that the enrypt/decrypt cycle completes successfully for a single frame message using the aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384 algorithm. | def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):
ciphertext, _ = aws_encryption_sdk.encrypt(
source=VALUES["plaintext_128"],
key_provider=self.kms_master_key_provider,
encryption_context=VALUES["encryption_context"],
frame_length=1024,
algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,
)
plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)
assert plaintext == VALUES["plaintext_128"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha384_ecdsa_p384_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_hkdf_sha256_ecdsa_p256_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_192_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_192_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_aes_128_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_128_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_single_frame(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_multiple_frames(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"] * 100,\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=1024,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"] * 100",
"async def test_noise_frame_helper_incorrect_key_fragments():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n in_pkt = bytes.fromhex(pkt)\n for i in range(len(in_pkt)):\n helper.data_received(in_pkt[i : i + 1])\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]",
"def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = MockAPINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n client_info=\"my client\",\n log_name=\"test\",\n )\n helper._transport = MagicMock()\n helper._writer = MagicMock()\n\n for pkt in outgoing_packets:\n helper.mock_write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake(30)",
"async def test_noise_frame_helper_incorrect_key():\n outgoing_packets = [\n \"010000\", # hello packet\n \"010031001ed7f7bb0b74085418258ed5928931bc36ade7cf06937fcff089044d4ab142643f1b2c9935bb77696f23d930836737a4\",\n ]\n incoming_packets = [\n \"01000d01736572766963657465737400\",\n \"0100160148616e647368616b65204d4143206661696c757265\",\n ]\n packets = []\n\n def _packet(type_: int, data: bytes):\n packets.append((type_, data))\n\n def _on_error(exc: Exception):\n raise exc\n\n helper = APINoiseFrameHelper(\n on_pkt=_packet,\n on_error=_on_error,\n noise_psk=\"QRTIErOb/fcE9Ukd/5qA3RGYMn0Y+p06U58SCtOXvPc=\",\n expected_name=\"servicetest\",\n )\n helper._transport = MagicMock()\n\n for pkt in outgoing_packets:\n helper._write_frame(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n for pkt in incoming_packets:\n helper.data_received(bytes.fromhex(pkt))\n\n with pytest.raises(InvalidEncryptionKeyAPIError):\n await helper.perform_handshake()",
"def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))",
"def gk_handshake_1_2_aes( self , packet ):\n\t\ttry:\n\n\t\t\t# Decapsulate the TKIP packet, and rebuild the plaintext packet.\n\t\t\tplaintext\t= self.handleAES.decapsulate( packet , self.TK )\n\t\t\tpacket \t\t= LLC()/SNAP()/EAPOL()/EAPOL_Key()/EAPOL_WPAKey()\n\t\t\tnew_packet \t= packet.__class__( plaintext )\n\t\t\t\n\t\t\t# Assert on the flags in the Key Information to verify it is GKHS Message 1/2.\n\t\t\tkeyinfoReceived \t= new_packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\t\tself.__setKeyIDFromFlaglist( self.__getFlaglist( keyinfoReceived ) )\n\t\t\tflaglist\t\t= ['HMAC_SHA1_AES','group','ack','mic','secure']\n\t\t\tflaglist.append( self.keyID ) # Copying the Key ID from the received packet.\n\t\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t\t'The received packet is not Group Key Handshake Message 1/2.'\n\t\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL Group Key Handshake Message 1/2 AES' )\n\t\t\t\n\t\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\t\tself.__assertWPAKeyMIC( new_packet , Crypto.Hash.SHA )\n\n\t\t\t# Update the Replay Counter.\n\t\t\tself.replayCounter\t= new_packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\t\t\n\t\t\t# Retrieve the Group Temporal key.\n\t\t\tself.GTK = self.handleAES.unwrapKey( new_packet.WPAKey , self.KEK ) # Resulting key of 16/32 octets.\n\t\t\tself.logger.logKey( 'Group Temporal Key' , self.GTK )\n\t\t\t\n\t\texcept:\n\t\t\traise",
"def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):",
"def test_decode_messages():\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3",
"def fw_handshake_3_4_aes( self , packet ):\n\t\t# Check if the Frame Check Sequence (FCS) flag is set in the Radiotap header, and\n\t\t# if so assert the correctness of the FCS.\n\t\tradiotapFCSFlag = hasFCS( packet )\n\t\tif radiotapFCSFlag is True:\n\t\t\tassertDot11FCS( packet )\n\t\t\tpacket.getlayer( EAPOL_WPAKey ).remove_payload() # Remove the FCS.\n\t\t\t\n\t\t# Assert on the flags in the Key Information to verify it is FWHS Message 3/4.\n\t\tkeyinfoReceived \t= packet.getlayer( EAPOL_WPAKey ).KeyInfo\n\t\tself.replayCounter\t= packet.getlayer( EAPOL_WPAKey ).ReplayCounter\n\t\tflaglist\t\t= ['HMAC_SHA1_AES','idx0','pairwise','install','ack','mic']\n\t\tkeyinfoCalculated \t= self.__getKeyInformation( flaglist )\n\t\tassert( keyinfoReceived == keyinfoCalculated ), \\\n\t\t\t'The received packet is not 4-Way Handshake Message 3/4.'\n\t\tself.logger.log( self.logger.RECEIVED , 'EAPOL 4-Way Handshake Message 3/4 AES' )\n\t\t\t\n\t\t# Assert that the EAPoL WPA Key layer has a valid MIC.\n\t\tself.__assertWPAKeyMIC( packet , Crypto.Hash.SHA )"
]
| [
"0.8205865",
"0.8129983",
"0.8120724",
"0.8086859",
"0.8070674",
"0.79456544",
"0.79447645",
"0.794005",
"0.7914017",
"0.7858736",
"0.78543276",
"0.7836834",
"0.780984",
"0.7741609",
"0.7599251",
"0.7597625",
"0.75322336",
"0.6708555",
"0.65327346",
"0.63315946",
"0.6212951",
"0.61953896",
"0.6084609",
"0.59902644",
"0.5989129",
"0.5776246",
"0.57611597",
"0.5745486",
"0.57397354",
"0.5717684"
]
| 0.8223634 | 0 |
Get key for metrics list | def get_metrics_key(self, project):
return "{0}-metrics".format(project) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_column_key(label: Tuple[str, ...], metrics: List[str]) -> Tuple[Any, ...]:\n parts: List[Any] = list(label)\n metric = parts[-1]\n parts[-1] = metrics.index(metric)\n return tuple(parts)",
"def metric_identifier(self) -> str:\n return self._metric_identifier",
"def key(key):\n return key",
"def metric_name(self) -> str:\n return self._values.get('metric_name')",
"def metric_name(self) -> str:\n return self._values.get('metric_name')",
"def getDiscoveredMetricsKeys(self):\n return tuple(self.__foundMetrcsKeySet)",
"def key(self):\n return key_for_name(self.name)",
"def get_key(self, metric, period):\n key = self.key_format\n key = key.replace('{metric}', metric)\n key = key.replace('{period}', period)\n return key",
"def metric_name(self) -> str:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> str:\n return pulumi.get(self, \"metric_name\")",
"def output_metric(self, key=None, metric='loss'):\n if key is None:\n key = self.key\n return self.metrics[key][metric][-1]",
"def key(self):\n return self.key_for(self.id)",
"def key(self):\n return self.name",
"def key(self):\n\n for member in self.members:\n if member.key:\n return member.name",
"def get_key(self, item):\r\n return item[0]",
"def get_key(self):\n return self._determine_key()",
"def get_metric_list(self) -> List[str]:\n ...",
"def get_key(self):\n return self.key",
"def get_key(self):\n return self.key",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def list_metrics(self):\n results = []\n if self.r.exists(self.metrics_key):\n keys = self.r.smembers(self.metrics_key)\n for k in keys:\n # metric_key, metric_type, metric_name, metric_help = keys.split(\" \", 3)\n results.append(k.split(\" \", 3))\n return results",
"def list_key(self):\n # Disable this false positive\n # pylint: disable=comparison-with-callable\n if self == SimulatorControlType.DEVICE_TYPE:\n return \"devicetypes\"\n # pylint: enable=comparison-with-callable\n return self.value + \"s\""
]
| [
"0.64254856",
"0.6350569",
"0.63114125",
"0.6273934",
"0.6273934",
"0.6222081",
"0.6210373",
"0.61114717",
"0.60946375",
"0.60946375",
"0.6091866",
"0.60288227",
"0.60165614",
"0.6011354",
"0.59872615",
"0.5983219",
"0.5978787",
"0.5976253",
"0.5976253",
"0.59761053",
"0.59761053",
"0.59761053",
"0.59761053",
"0.59761053",
"0.59761053",
"0.59761053",
"0.59761053",
"0.59761053",
"0.5971945",
"0.59696823"
]
| 0.69156116 | 0 |
Get key for metrics filters names | def get_filters_names_key(self, project, metric_name):
return u"{0}-metrics-filters:{1}".format(project, to_unicode(metric_name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_filters_values_key(self, project, metric_name, f):\n return u\"{0}-metrics-filter-values:{1}:{2}\".format(project,\n to_unicode(metric_name),\n to_unicode(f))",
"def get_filter_name(self):\n pass",
"def get_key(self, metric, period):\n key = self.key_format\n key = key.replace('{metric}', metric)\n key = key.replace('{period}', period)\n return key",
"def getFilterNameFromInt(cls, num):\n return cls.SUPPORTED_FILTERS[num]",
"def get_current_filters(self) -> str:\r\n return self.__filters_string",
"def name(self):\n\t\t# This is necessary for ColumnLists that are used\n\t\t# for CondDescs as well. Ideally, we'd do this on an\n\t\t# InputKeys basis and yield their names (because that's what\n\t\t# formal counts on), but it's probably not worth the effort.\n\t\treturn \"+\".join([f.name for f in self.inputKeys])",
"def names_and_ids(self):\n names = [filter_.DISPLAY_NAME for filter_ in self._filters.values()]\n \n return sorted(zip(names, self._filters.keys()))",
"def get_metrics_key(self, project):\n return \"{0}-metrics\".format(project)",
"def make_key(self, project, name, period, filters=None):\n\n parts = [project, name, period]\n\n if isinstance(filters, dict):\n filters_part = u\"/\".join(\n [u\"{0}|{1}\".format(f, to_unicode(self.clean_filter_value(filters[f])))\n for f in sorted(filters.keys(), key=lambda x: x) if f])\n\n if filters_part:\n parts.append(filters_part)\n\n return u';'.join(parts)",
"def _key(self):\n key_args = [self.__class__.__name__] + [str(a) for a in self.args]\n return (\":\".join(key_args))",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")",
"async def get_filter(self, **kwargs: Any) -> str:\n return self._telescope.filter_name",
"def key(self):\n return key_for_name(self.name)",
"def key(self, name):\n return name",
"def configuration_keys(self):\n return ['filter1', 'echangle', 'xdangle']",
"def metric_name(self) -> str:\n return self._values.get('metric_name')",
"def metric_name(self) -> str:\n return self._values.get('metric_name')",
"def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters",
"def get_metric_filter(\n log_group_name,\n filter_name_prefix,\n metric_name,\n metric_namespace,\n):\n paginator = CLIENT.get_paginator(\"describe_metric_filters\")\n response_iterator = paginator.paginate(\n logGroupName=log_group_name,\n filterNamePrefix=filter_name_prefix,\n )\n metric_filters_response = [\n metric_filter\n for response in response_iterator\n for metric_filter in response.get(\"metricFilters\", [])\n ]\n LOGGER.debug(\"metric filters response: %s\", metric_filters_response)\n if not metric_filters_response:\n raise ValueError(\n \"failed to find existing metric filter with \"\n f\"logGroupName: [{log_group_name}], \"\n f\"filterNamePrefix: [{filter_name_prefix}]\"\n )\n # Get the fist metric filter with a matching transformation with the same\n # metricNameSpace and metricName\n # NOTE: There is a chance that there are multiple metric filters since the\n # describe_metric_filters uses a name prefix\n for m_f in metric_filters_response:\n metric_filters = [\n m_f\n for m_t in m_f[\"metricTransformations\"]\n if m_t[\"metricName\"] == metric_name and m_t[\"metricNamespace\"] == metric_namespace\n ]\n if metric_filters:\n break\n\n if not metric_filters:\n raise ValueError(\n \"failed to find existing metric filter with \"\n f\"logGroupName: [{log_group_name}], \"\n f\"filterNamePrefix: [{filter_name_prefix}], \"\n f\"metricName: [{metric_name}], \"\n f\"metricNamespace: [{metric_namespace}]\"\n )\n\n metric_filter_properties = [\n \"filterName\",\n \"filterPattern\",\n \"logGroupName\",\n \"metricTransformations\",\n ]\n # only return the properties that are needed for the put_metric_filter call\n return {k: v for k, v in metric_filters[0].items() if k in metric_filter_properties}",
"def get_key(cls, obj, query):\n\n if hasattr(obj, 'config'):\n for item in obj.config.hardware.device:\n if query in item.deviceInfo.label:\n key = item.key\n controller_key = item.controllerKey\n\n return (key, controller_key)",
"def get_filters(self):",
"def mapping_validator_key(mapping):\n return (mapping.instrument + \"_all_ld.tpn\", mapping.name)"
]
| [
"0.7208164",
"0.68630666",
"0.5900287",
"0.58664966",
"0.5773699",
"0.57675856",
"0.57655513",
"0.5757507",
"0.57541454",
"0.5703139",
"0.5664727",
"0.5664727",
"0.5664727",
"0.5664727",
"0.5664727",
"0.5664727",
"0.5664727",
"0.5664727",
"0.5664727",
"0.5615112",
"0.5581076",
"0.557863",
"0.5569578",
"0.55463725",
"0.55463725",
"0.5514206",
"0.5503072",
"0.5499613",
"0.5476507",
"0.5456935"
]
| 0.8068788 | 0 |
Convert filter value to string object | def clean_filter_value(self, filter_value):
if isinstance(filter_value, bool) or isinstance(filter_value, NoneType):
return str(bool)
return filter_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _filterDictToStr(self, filterDict):\n values = []\n for key, vals in filterDict.items():\n if key not in ('contentRating', 'label', 'contentRating!', 'label!'):\n raise BadRequest(f'Unknown filter key: {key}')\n values.append(f\"{key}={'%2C'.join(vals)}\")\n return '|'.join(values)",
"def make_filter_string(cls, filter_specification):\n registry = get_current_registry()\n visitor_cls = registry.getUtility(IFilterSpecificationVisitor,\n name=EXPRESSION_KINDS.CQL)\n visitor = visitor_cls()\n filter_specification.accept(visitor)\n return str(visitor.expression)",
"def terraform_output_filter(filter, payload):\n if filter in payload:\n return payload[filter]['value']\n else:\n return None",
"def valueToString():",
"def get_filter(self) -> str:\n\n return \";;\".join(self.filters)",
"def metadata_filter_as_string(metadata_filter):\n if not isinstance(metadata_filter, dict):\n return metadata_filter\n\n additional = metadata_filter.get('additional', [])\n if additional == 'all':\n entries = ['all']\n else:\n entries = [key for key in additional if key not in _JUPYTEXT_CELL_METADATA]\n\n excluded = metadata_filter.get('excluded', [])\n if excluded == 'all':\n entries.append('-all')\n else:\n entries.extend(['-' + e for e in excluded])\n\n return ','.join(entries)",
"def to_string(self):\n filter_string = '('\n variable_length = len(self.values) - 1\n for index in range(0, variable_length + 1):\n filter_string += '&' + self.values[index]\n # append or for all but the last\n if index < variable_length:\n filter_string += self.combination_operator\n filter_string += ')'\n\n return filter_string",
"def get_filter_name(self):\n pass",
"def filter(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"filter\")",
"def filter(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"filter\")",
"def __repr__(self):\n return str(self._filtered_items)",
"def transform_python(self, value):\n return str(value)",
"def filter(self, value, model=None, context=None):\n\n # string filter: skip non-strings\n if type(value) is not str:\n return value\n\n linker = Linker(**self.linkify_params)\n return linker.linkify(value)",
"def to_string(self, name, value):\r\n \r\n return str(value)",
"def get_filters_values_key(self, project, metric_name, f):\n return u\"{0}-metrics-filter-values:{1}:{2}\".format(project,\n to_unicode(metric_name),\n to_unicode(f))",
"def __str__(self):\n return self._status(True, \"Filter Settings\", True, True)",
"def test_str(self):\n\t\tself.filter.set_operator('.match')\n\t\tself.filter.set_limit(\"test\")\n\t\tself.assertTrue(str(self.filter), \"String conversion failed!\")",
"def subst(self, value, filter=None):\n\n if isinstance(value, Literal):\n return value._value\n elif isinstance(value, tuple):\n return tuple(self.subst(i, filter) for i in value)\n elif isinstance(value, list):\n return list(self.subst(i, filter) for i in value)\n elif isinstance(value, dict):\n return {i: self.subst(value[i], filter) for i in value}\n elif isinstance(value, StringTypes):\n def subfn(mo):\n var = mo.group(0)\n\n if var == \"$$\":\n return \"$\"\n\n # Apply variable filters\n parts = var[2:-1].split(\"|\")\n value = self.evaluate(parts[0])\n\n if len(parts) > 1:\n # Filters supplied directly\n for part in parts[1:]:\n if len(part) == 0:\n # Empty filter can be used to disable auto filter\n continue\n else:\n value = self.callfilter(part, value)\n elif filter:\n # Use auto-filter if specified\n for part in filter.split(\"|\"):\n value = self.callfilter(part, value)\n\n return value\n return re.sub(r\"\\$\\$|\\$\\(.*?\\)\", subfn, value)\n else:\n return value",
"def getparameters(filter,title = \"\"):\n vardic = filter.__dict__\n for i in list(vardic.keys()):\n if vardic[i] is not None:\n title += \" \"+i+\": \"\n title += str(vardic[i])+\",\"\n return title[:-1]",
"def value_to_string(self, obj):\n value = self._get_val_from_obj(obj)\n return self.get_prep_value(value)",
"def value_to_string(self, obj):\n value = self._get_val_from_obj(obj)\n return self.get_prep_value(value)",
"def value_to_string(self, obj):\n value = self._get_val_from_obj(obj)\n return self.get_prep_value(value)",
"def url_filter(val):\n if isinstance(val, Undefined):\n return UNDEFINED_LABEL\n return quote(str(val))",
"def filter(self) -> Optional[str]:\n return pulumi.get(self, \"filter\")",
"def parse(value: str):\n return [member for member in FilterMode if member.name == value][0]",
"def get_current_filters(self) -> str:\r\n return self.__filters_string",
"def as_string (self) :\n\n if self.is_machinetag() :\n return \"%s:%s=%s\" % (self.namespace(), self.predicate(), self.value())",
"def filter(self, value, model=None, context=None):\n\n # string filter: skip non-strings\n if type(value) is not str:\n return value\n\n if self.mode == 'left':\n return value.lstrip(self.chars)\n elif self.mode == 'right':\n return value.rstrip(self.chars)\n else:\n return value.strip(self.chars)",
"def value_to_string(self, obj):\n value = self.value_from_object(obj)\n return value",
"def normalizeFilter(_filter):\n _filter = '' if _filter == None else _filter\n\n _filter = _filter.rstrip(' ')\n _filter = '%' if _filter == '' else _filter\n _filter = _filter.replace('*','%')\n _filter = _filter.replace('?','_')\n\n return _filter"
]
| [
"0.7072729",
"0.6677008",
"0.63663065",
"0.63333833",
"0.6180153",
"0.6167613",
"0.6098829",
"0.60581315",
"0.60311866",
"0.60164213",
"0.6016281",
"0.60055685",
"0.5987304",
"0.5948737",
"0.5947506",
"0.58897954",
"0.5842242",
"0.5801041",
"0.5799115",
"0.5799011",
"0.5799011",
"0.5799011",
"0.5764558",
"0.57491165",
"0.57201815",
"0.57177293",
"0.57081056",
"0.569582",
"0.5682439",
"0.56820744"
]
| 0.7261331 | 0 |
Run a Kafka consumer and apply process_func to incoming messages. | def consume_messages(process_func: Callable[[str], None]):
consumer = get_consumer()
for message in consumer:
log.debug(f'Received a message: {message}')
try:
process_func(message.value)
except Exception as e:
log.error(f'Failed to process a message: {message.value}')
log.exception(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listen(self, handler):\n try:\n logger.info('Listening on topic: {}'.format(self.topic))\n consumer = KafkaConsumer(self.topic)\n\n for msg in consumer:\n object_dict = self._extract_updated_object(msg)\n if object_dict:\n handler(object_dict)\n\n except Exception as ex:\n if isinstance(ex, KafkaError):\n logger.error('Error with Kafka: {}'.format(ex))",
"def kafka_consumer(kafka_consumer_factory):\n return kafka_consumer_factory()",
"def main(host, port, debug=False, max_iters=None, only_proc=False, bulk=False):\n loop = asyncio.get_event_loop()\n asyncio.run(consumer(host, port, max_iters, only_proc, loop, bulk))",
"def start_consuming(self):\n self.logger.debug(\"Issuing consumer related RPC commands\")\n\n self._channel.basic_qos(prefetch_count=self._max_concurrent)\n self._channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\n consume_kwargs = {\"queue\": self._queue_name}\n if PIKA_ONE:\n consume_kwargs[\"on_message_callback\"] = self.on_message\n else:\n consume_kwargs[\"consumer_callback\"] = self.on_message\n\n self._consumer_tag = self._channel.basic_consume(**consume_kwargs)",
"def consumer(pipeline_name, consumer_name, event_name, worker_id):\n processRules = pkg.components.processComponentMap[\"rules\"][consumer_name]\n pipeline = pkg.components.processComponentMap[pipeline_name]\n event = pkg.components.processComponentMap[event_name]\n componentMap = {}\n while not event.is_set() or not pipeline.empty():\n log.info(f\"Starting consumer {consumer_name}-{worker_id}\")\n for rule in processRules[\"extensionList\"].values():\n extension_map[rule[\"extension\"]].process(function=rule[\"operation\"], componentMap=componentMap, **rule)\n log.info(f\"{consumer_name}-{worker_id} received EXIT event. Exiting\")",
"def create_consumer(self, topic_id: str, callback: Callable, gcp_subscription_id:str=None):\n backend = None\n if self.vendor == 'kafka':\n backend = KafkaClient(topic_id, self.configs['kafka_servers'])\n Consumer(backend, callback)\n else:\n project_id = os.getenv(\"GOOGLE_CLOUD_PROJECT\")\n subscription_id = gcp_subscription_id\n backend = GooglePubSubClient(project_id=project_id, topic=topic_id,\n subscription_id=subscription_id, gcp_configs=self.configs, callback=callback)\n runner_thread = Thread(target=runner)\n runner_thread.start()",
"def run(self, item_callback=None):\n self.item_callback = item_callback\n logger.info('Starting consumer. Use CTRL+C to stop.')\n while self.shards:\n # time.sleep(0.5)\n for shard in self.shards:\n shard_id = shard['ShardId']\n shard_iterator = self.get_iterator(shard)\n self.process_records(shard_iterator, shard_id)",
"def stress_test_consumer():\n consumer = kafka_manager.get_kafka_consumer()\n for message in consumer:\n message_content = json.loads(message.value.decode())\n message_topic = message.topic\n print(\"received:\")\n print(message_topic)\n print(message_content)",
"def run_consume(config_file):\n config = init_config(config_file)\n # juniper_conf\n juniper_conf_data = config.get('data', 'juniper_conf')\n remove_expired_juniper_conf = config.getboolean('delete_data', 'juniper_conf')\n juniper_conf_data_age = config.get('data_age', 'juniper_conf')\n # nmap services\n nmap_services_py_data = config.get('data', 'nmap_services_py')\n # nagios checkmk\n nagios_checkmk_data = config.get('data', 'nagios_checkmk')\n # cfengine report\n cfengine_data = config.get('data', 'cfengine_report')\n # noclook\n noclook_data = config.get('data', 'noclook')\n # Consume data\n if juniper_conf_data:\n data = load_json(juniper_conf_data)\n noclook_juniper_consumer.consume_juniper_conf(data)\n if nmap_services_py_data:\n data = load_json(nmap_services_py_data)\n noclook_nmap_consumer.insert_nmap(data)\n if nagios_checkmk_data:\n data = load_json(nagios_checkmk_data)\n noclook_checkmk_consumer.insert(data)\n if cfengine_data:\n data = load_json(cfengine_data)\n noclook_cfengine_consumer.insert(data)\n if noclook_data:\n data = load_json(noclook_data)\n consume_noclook(data)\n # Clean up expired data\n if remove_expired_juniper_conf:\n noclook_juniper_consumer.remove_juniper_conf(juniper_conf_data_age)",
"def kafka_payment_consumer_worker(mq: queue.Queue):\n global app_config\n\n # Client\n consumer = KafkaConsumer('payment',\n bootstrap_servers=bootstrap_servers,\n value_deserializer=lambda item: json.loads(item.decode('utf-8')))\n\n\n\n while not t_stop_event.is_set():\n try:\n # Message loop\n for message in consumer:\n logging.info(\"READING MESSAGE %s:%d:%d: key=%s value=%s\" % (\n message.topic,\n message.partition,\n message.offset,\n message.key,\n message.value)\n )\n\n # simple sanitizer\n if (\"action\" not in message.value) \\\n or (\"message\" not in message.value) \\\n or (\"request\" not in message.value[\"message\"]):\n logging.info(\"MALFORMED MESSAGE value=%s SKIPPING\" % (message.value,))\n continue\n\n # Action switch\n if str(message.value[\"action\"]).upper() == \"NOTIFY_DELIVERY_RESPONSE\":\n logging.info(\"MESSAGE <NOTIFY_DELIVERY_RESPONSE> RECEIVE\") # Mocked\n \"\"\"logging.info(\"PUT credit_deliverer MESSAGE in QUEUE\")\n mq.put(\n credit_deliverer()\n )\"\"\"\n except Exception as e:\n logging.fatal(e, exc_info=True)\n # Post routine\n\n consumer.close()\n return",
"def _mp_consume(client, group, topic, queue, size, events, **consumer_options):\n\n # Initial interval for retries in seconds.\n interval = 1\n while not events.exit.is_set():\n try:\n # Make the child processes open separate socket connections\n client.reinit()\n\n # We will start consumers without auto-commit. Auto-commit will be\n # done by the master controller process.\n consumer = SimpleConsumer(client, group, topic,\n auto_commit=False,\n auto_commit_every_n=None,\n auto_commit_every_t=None,\n **consumer_options)\n\n # Ensure that the consumer provides the partition information\n consumer.provide_partition_info()\n\n while True:\n # Wait till the controller indicates us to start consumption\n events.start.wait()\n\n # If we are asked to quit, do so\n if events.exit.is_set():\n break\n\n # Consume messages and add them to the queue. If the controller\n # indicates a specific number of messages, follow that advice\n count = 0\n\n message = consumer.get_message()\n if message:\n while True:\n try:\n queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)\n break\n except queue.Full:\n if events.exit.is_set():\n break\n\n count += 1\n\n # We have reached the required size. The controller might have\n # more than what he needs. Wait for a while.\n # Without this logic, it is possible that we run into a big\n # loop consuming all available messages before the controller\n # can reset the 'start' event\n if count == size.value:\n events.pause.wait()\n\n else:\n # In case we did not receive any message, give up the CPU for\n # a while before we try again\n time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)\n\n consumer.stop()\n\n except KafkaError as e:\n # Retry with exponential backoff\n log.error(\n \"Problem communicating with Kafka (%s), retrying in %d seconds...\" % (e, interval))\n time.sleep(interval)\n interval = interval * 2 if interval * 2 < MAX_BACKOFF_SECONDS else MAX_BACKOFF_SECONDS",
"def start_loop(\n consumer: Consumer,\n message_handler: Callable[[Message], None],\n on_success: Callable[[Message], None] = lambda msg: None,\n on_failure: Callable[[Message, Exception], None] = lambda msg, e: None,\n on_config_update: Callable[[], None] = lambda: None,\n logger: logging.Logger = logging.getLogger('IR')):\n # Used for re-fetching the configuration with a throttle\n last_updated_minute = int(time.time() / 60)\n if not config()['global_config_url']:\n config_tag = _fetch_latest_config_tag()\n\n while True:\n msg = consumer.poll(timeout=0.5)\n if msg is None:\n continue\n curr_min = int(time.time() / 60)\n if not config()['global_config_url'] and curr_min > last_updated_minute:\n # Check for configuration updates\n latest_config_tag = _fetch_latest_config_tag()\n last_updated_minute = curr_min\n if config_tag is not None and latest_config_tag != config_tag:\n config(force_reload=True)\n config_tag = latest_config_tag\n on_config_update()\n if msg.error():\n if msg.error().code() == KafkaError._PARTITION_EOF:\n logger.info('End of stream.')\n else:\n logger.error(f\"Kafka message error: {msg.error()}\")\n continue\n val = msg.value().decode('utf-8')\n try:\n msg = json.loads(val)\n except ValueError as err:\n logger.error(f'JSON parsing error: {err}')\n logger.error(f'Message content: {val}')\n consumer.commit()\n continue\n logger.info(f'Received event: {msg}')\n start = time.time()\n try:\n message_handler(msg)\n # Move the offset for our partition\n consumer.commit()\n on_success(msg)\n logger.info(f\"Handled {msg['evtype']} message in {time.time() - start}s\")\n except Exception as err:\n logger.error(f'Error processing message: {err.__class__.__name__} {err}')\n logger.error(traceback.format_exc())\n # Save this error and message to a topic in Elasticsearch\n on_failure(msg, err)",
"async def consumer_loop(\n uri: str,\n topic: str,\n ssl_context: SSLContext,\n writers: List[Callable[[ConsumerPayload], Awaitable[int]]],\n) -> int:\n log.info(\"consumer: starting\")\n if len(writers) < 1:\n raise ValueError(\"there must be at least one writer passed to consumer_loop.\")\n queue: asyncio.Queue[ConsumerPayload] = asyncio.Queue()\n async with AIOKafkaConsumer(\n topic,\n bootstrap_servers=uri,\n security_protocol=\"SSL\",\n ssl_context=ssl_context,\n group_id=DEFAULT_GROUP_ID,\n ) as consumer:\n await asyncio.gather(\n decoder(queue, consumer), writer_wrapper(queue=queue, writers=writers)\n )\n log.info(\"consumer: exiting\")\n return 0",
"def start_consuming(self):\n logger.info('Issuing consumer related RPC commands')\n self.add_on_cancel_callback()\n logger.info(\"[{}] Waiting for messages on exchange {}\".format(self.bot_id, self.exchange))\n self._consumer_tag = self._channel.basic_consume(self.on_message,\n self.queue_name)",
"def start_consuming(self):\n # LOGGER.info('Issuing consumer related RPC commands')\n if self._init_ok_ctrl and self._init_ok_task:\n self._channel_ctrl.add_on_cancel_callback(self.on_consumer_ctrl_cancelled)\n self._channel_task.add_on_cancel_callback(self.on_consumer_task_cancelled)\n self._consumer_tag_task = self._channel_task.basic_consume(\n self.queue_task,\n auto_ack=False,\n on_message_callback=self.on_message\n )\n self._consumer_tag_ctrl = self._channel_ctrl.basic_consume(\n self._topic_queue_name,\n auto_ack=False,\n on_message_callback=self.on_topic\n )\n self.was_consuming = True\n self._consuming = True",
"def consumer(self, consumer):\n self._consumer = consumer",
"def receive_procedure(channel_to_receive, queue_name=DEFAULT_QUEUE_NAME):\n # Signal handler to exit from function\n signal.signal(signal.SIGINT, signal_handler)\n\n # Start consuming\n channel_to_receive.basic_consume(callback, queue=queue_name, no_ack=True)\n channel_to_receive.start_consuming()",
"def consume(self):\n LOGGER.debug('Consumer Initialized')\n # self.connect()\n channel = self.get_channel()\n self._bind_things(channel)\n\n try:\n LOGGER.info('Start consuming')\n channel.start_consuming()\n except ConnectionClosed:\n LOGGER.exception('Pika connection closed detected. Will attempt to start consuming again')\n self.consume()\n except KeyboardInterrupt as e:\n LOGGER.info('Keyboard interrupt, stop consuming')\n self.shutdown()\n raise e\n except Exception as e:\n LOGGER.exception(\"'%s\" % str(e))\n self.shutdown()\n if self.settings.CONSUMER['RAISE_EXCEPTION']:\n LOGGER.info(\"CONSUMER RAISED EXCEPTION\")\n raise e",
"def _mp_consume(client, group, topic, chunk, queue, start, exit, pause, size):\n\n # Make the child processes open separate socket connections\n client.reinit()\n\n # We will start consumers without auto-commit. Auto-commit will be\n # done by the master controller process.\n consumer = SimpleConsumer(client, group, topic,\n partitions=chunk,\n auto_commit=False,\n auto_commit_every_n=None,\n auto_commit_every_t=None)\n\n # Ensure that the consumer provides the partition information\n consumer.provide_partition_info()\n\n while True:\n # Wait till the controller indicates us to start consumption\n start.wait()\n\n # If we are asked to quit, do so\n if exit.is_set():\n break\n\n # Consume messages and add them to the queue. If the controller\n # indicates a specific number of messages, follow that advice\n count = 0\n\n message = consumer.get_message()\n if message:\n queue.put(message)\n count += 1\n\n # We have reached the required size. The controller might have\n # more than what he needs. Wait for a while.\n # Without this logic, it is possible that we run into a big\n # loop consuming all available messages before the controller\n # can reset the 'start' event\n if count == size.value:\n pause.wait()\n\n else:\n # In case we did not receive any message, give up the CPU for\n # a while before we try again\n time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)\n\n consumer.stop()",
"def feed(self, handle, consumer, do_features=...): # -> bool:\n ...",
"async def consumer(message):\n # TODO\n print(message)",
"def subscribeConsumer(consumer):",
"def start_exited_consumers(kafka, p):\n for i in TOPICS[\"data\"]:\n kafka.initialize_consumer(topic=i[\"topic\"], config=i[\"config\"], partition=int(i[\"partition\"]))",
"def consumer(self, no_ack=False, prefetch=100, priority=None):\n if prefetch is not None:\n self.channel.prefetch_count(prefetch)\n self.channel._consume(self, no_ack, priority)\n self.consuming = True\n yield Consumer(self)",
"def consume(self, handler):\n bounded_handler = partial(handler, self)\n self._consume_handler = handler\n self.log.debug(\"Start consuming\")\n self._channel.add_on_close_callback(\n self.on_channel_closed\n )\n self._consumer_tag = self._channel.basic_consume(bounded_handler,\n self.name)\n self.log.debug(\"Consumer tag %s on CHANNEL%i\",\n self._consumer_tag, self._channel.channel_number)",
"def kafka_consumer(request: 'SubRequest') -> KafkaConsumer:\n _, kafka_port = request.getfixturevalue(kafka_fixture_name)\n\n used_consumer_kwargs = consumer_kwargs.copy()\n used_consumer_kwargs.setdefault('consumer_timeout_ms', DEFAULT_CONSUMER_TIMEOUT_MS)\n used_consumer_kwargs.setdefault('bootstrap_servers', 'localhost:{}'.format(kafka_port))\n\n consumer = KafkaConsumer(\n *kafka_topics,\n **used_consumer_kwargs,\n )\n\n if seek_to_beginning:\n assert kafka_topics, (\n 'In order to be able to seek to beginning, we must have some partitions assigned '\n 'for which we need to subscribe to topics.')\n\n def partitions_assigned():\n consumer.poll(timeout_ms=20)\n return len(consumer.assignment()) > 0\n\n _wait_until(partitions_assigned)\n\n consumer.seek_to_beginning()\n return consumer",
"def consume(\n self,\n consume_function: Callable[[int, tf.keras.Model], T],\n num_processes: int = None,\n context: Callable[[int], EnsembleContextManager] = None,\n models: Iterable[int] = None,\n ) -> List[T]:\n return self._run_in_processes(\n process_creator=_model_consuming_process,\n inner_function=consume_function,\n num_processes=num_processes,\n context=context,\n models=models,\n )",
"def topic_listener(\n topic,\n bootstrap_servers: str,\n offset_reset: str = \"earliest\",\n group: str = None,\n test: bool = False,\n):\n\n # Configure dask client\n dask_client = dask.distributed.Client(\n address=f\"{config['dask_pgir']['host']}:{config['dask_pgir']['scheduler_port']}\"\n )\n\n # init each worker with AlertWorker instance\n worker_initializer = WorkerInitializer()\n dask_client.register_worker_plugin(worker_initializer, name=\"worker-init\")\n\n # Configure consumer connection to Kafka broker\n conf = {\n \"bootstrap.servers\": bootstrap_servers,\n \"default.topic.config\": {\"auto.offset.reset\": offset_reset},\n }\n if group is not None:\n conf[\"group.id\"] = group\n else:\n conf[\"group.id\"] = os.environ.get(\"HOSTNAME\", \"kowalski\")\n\n # make it unique:\n conf[\n \"group.id\"\n ] = f\"{conf['group.id']}_{datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f')}\"\n\n # Start alert stream consumer\n stream_reader = PGIRAlertConsumer(topic, dask_client, instrument=\"PGIR\", **conf)\n\n while True:\n try:\n # poll!\n stream_reader.poll()\n\n except EopError as e:\n # Write when reaching end of partition\n log(e.message)\n if test:\n # when testing, terminate once reached end of partition:\n sys.exit()\n except IndexError:\n log(\"Data cannot be decoded\\n\")\n except UnicodeDecodeError:\n log(\"Unexpected data format received\\n\")\n except KeyboardInterrupt:\n log(\"Aborted by user\\n\")\n sys.exit()\n except Exception as e:\n log(str(e))\n _err = traceback.format_exc()\n log(_err)\n sys.exit()",
"def kafka_setup(self):\n # To consume latest messages and auto-commit offsets\n consumer = KafkaConsumer('test-topic',\n group_id='test-consumer',\n bootstrap_servers=['kafka:9092'])\n\n filename = \"/home/debianml/idsFinal/modelo_ultimo_newfeatures.sav\"\n model = pickle.load(open(filename, 'rb'))\n\n for message in consumer:\n self.hacer_prediccion(model, pickle.loads(message.value))\n\n # consume earliest available messages, don't commit offsets\n KafkaConsumer(auto_offset_reset='earliest', enable_auto_commit=False)\n\n # StopIteration if no message after 1sec\n KafkaConsumer(consumer_timeout_ms=1000)",
"def start_consuming(self, channel, rx_queue_name):\n if self.should_stop():\n logger.info(\"ready to stop, pause to consume\")\n return\n logger.info('Issuing consumer related RPC commands')\n self._consumer_tag = channel.basic_consume(\n self.on_message, rx_queue_name, auto_ack = False)\n channel.start_consuming()"
]
| [
"0.62038803",
"0.5999372",
"0.59506476",
"0.587573",
"0.58691865",
"0.5739204",
"0.5648513",
"0.56234175",
"0.55911964",
"0.5572861",
"0.55719227",
"0.55717945",
"0.5530835",
"0.55171555",
"0.55148387",
"0.54914945",
"0.5488683",
"0.54419833",
"0.542208",
"0.5419174",
"0.5410294",
"0.53834164",
"0.5368067",
"0.53297305",
"0.53287",
"0.5318945",
"0.52683014",
"0.52526146",
"0.5240926",
"0.5232508"
]
| 0.7023197 | 0 |
Set up an Arlo media player. | async def async_setup_platform(hass, _config, async_add_entities, _discovery_info=None):
arlo = hass.data.get(COMPONENT_DATA)
if not arlo:
return
players = []
for camera in arlo.cameras:
if camera.has_capability(MEDIA_PLAYER_KEY):
name = "{0}".format(camera.name)
players.append(ArloMediaPlayer(name, camera))
async_add_entities(players) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def OnPlay(self):\r\n # check if there is a file to play, otherwise open a\r\n # Tk.FileDialog to select a file\r\n print(\"1-1\")\r\n\r\n\r\n self.Media = self.Instance.media_new(self.youtube_url)\r\n self.player.set_media(self.Media)\r\n\r\n # set the window id where to render VLC's video output\r\n if platform.system() == 'Windows':\r\n print(\"1-3\")\r\n self.player.set_hwnd(self.GetHandle())\r\n else:\r\n print(\"1-4\")\r\n self.player.set_xwindow(self.GetHandle()) # this line messes up windows\r\n # FIXME: this should be made cross-platform\r\n\r\n # Try to launch the media, if this fails display an error message\r\n if self.player.play() == -1:\r\n print(\"1-6\")\r\n self.errorDialog(\"Unable to play.\")",
"def setUp(self):\n self.player = Player()",
"def __init__(self, name, device):\n self._name = name\n self._unique_id = device.entity_id\n\n self._device = device\n self._name = name\n self._volume = None\n self._muted = None\n self._state = None\n self._shuffle = None\n self._position = 0\n self._track_id = None\n self._playlist = []\n\n _LOGGER.info(\"ArloMediaPlayer: %s created\", self._name)",
"def __init__(self, settings):\n super().__init__(settings, self.player_info_url, Player)",
"def start_soundtrack(self):\n sources = screens['Intro']['music']\n self.source = choice(sources)\n Logger.info('Chose \"{}\" as the intro music.'.format(self.source))\n try:\n SoundManager.music[self.source]\n except KeyError:\n SoundManager.add_music(self.source, self)\n SoundManager.play_music(self.source)",
"def setup(self):\n # Create your sprites and sprite lists here\n self.game: Game = Game(SCREEN_WIDTH, SCREEN_HEIGHT, TILE_SIZE, 1, grid_layers = 4)\n self.game.game_message = \"Lead the Rabbit home\"\n\n # show the menu so that we see the instructions\n self.game.menu.button_list[0].text = \"Start\"\n self.game.menu.is_visible = True",
"def start(self):\n\tglobal mode\n\tmode=\"./music/\"\n\tglobal message\n\tif message!=2:\n\t\tmessage=1\n\t\tbot.loop.create_task(play())",
"async def async_media_play(self) -> None:\n await self._projector.send_command(PLAY)",
"def prepare_media(self):\n start=self.get_pref('starttime')\n stop=self.get_pref('stoptime')\n## volume=limit(self.get_pref('volume'),0,200) # use limit() to ensure that volume pref is sensible...\n m=self.instance.media_new(self.mrl())\n# NONE of the following options work\n# m=self.instance.media_new(self.mrl(),'gain=0.2')\n## m=self.instance.media_new(self.mrl(),'sout-raop-volume=%s' % volume)\n# m=self.instance.media_new(self.mrl(),'audio-replay-gain-mode=track','--audio-replay-gain-default=0.2')\n if start:\n m.add_options('start-time=%s' % start) \n if stop:\n m.add_options('stop-time=%s' % stop) \n# the following test code DOES NOT WORK, though it does in cvlc at the command line, eg > cvlc my.mp3 --gain=0.2\n# gain=\"1.5\"\n# print \"SETTING GAIN for %s at %s%%\" % (self.uid,gain)\n# m.add_option('gain=%s' % gain)\n return m",
"def init_player():\n global active_track_idx\n global track_last_slided_pos\n global track_last_paused_pos\n global track_total_play_time \n\n # INITIALIZE Player\n active_track_idx = -1\n cancel_update_play_time_loop()\n cancel_track_end_event_loop()\n track_status.set(\"---\")\n track_title.set(\"--- : \")\n play_pause_btn.configure(image=play_img)\n track_last_slided_pos = 0\n track_last_paused_pos = 0\n track_total_play_time = 0\n track_pos_label.configure(text=\"00:00\")\n track_length_label.configure(text=\"00:00\")\n track_pos_slider.configure(state=\"disabled\")\n track_pos.set(0)",
"def __init__(self, vlc_player, drone_gui):\n QMainWindow.__init__(self)\n self.setWindowTitle(\"VLC Drone Video Player\")\n\n # save the media player\n self.mediaplayer = vlc_player\n\n # need a reference to the main drone vision class\n self.drone_vision = drone_gui\n\n # create the GUI\n self.createUI()",
"def setup_pymol():\n pymol.finish_launching() # Prevent threading errors\n # Configure global settings\n cmd.set('scene_buttons', 1)\n cmd.set('matrix_mode', 1)\n cmd.set('movie_panel', 1)\n # Configure quality settings\n cmd.mset(\"1 x500\")\n cmd.set('ray_trace_frames', 1)\n cmd.viewport(800, 800)",
"def __init__(self, config, core):\n logger.debug(\n \"__init__(%s, %s)\",\n config, core)\n\n super(AutoplayFrontend, self).__init__()\n self.core = core\n self.config = config[Extension.ext_name]\n\n # State file\n data_dir = Extension.get_data_dir(config)\n self.statefile = pathlib.Path(data_dir) / 'autoplay.state'\n logger.debug(\n \"Use '%s' as statefile.\",\n self.statefile)",
"async def setup_embed(self):\n\n # init\n embed = Embed()\n embed.colour = 0xF54719\n\n # setting up\n if(self.title != None):\n embed.title = self.title\n \n if(self.description != None):\n embed.description = self.description\n \n if(self.colour != None):\n embed.colour = self.colour\n \n if(self.footer != None):\n embed.set_footer(text = self.footer, icon_url = self.client.user.avatar_url)\n \n else:\n embed.set_footer(text = f\"v{Bot_config.version} - {Bot_config.phase} | Credit : DrLarck & DrMegas\", icon_url = self.client.user.avatar_url)\n \n if(self.thumb != None):\n embed.set_thumbnail(url = self.thumb)\n\n embed.set_author(name = self.client.user.name, icon_url = self.client.user.avatar_url)\n \n return(embed)",
"def setup(self, registry):\n logger.info(\"%s %s\", self.dist_name, self.version)\n\n from .frontend import AutoplayFrontend\n registry.add(\"frontend\", AutoplayFrontend)",
"def play(self):\n self.playing = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?",
"def init(cls):\n\n cls.configs = yaml.load( file('../local/config.yaml') )\n cls.is_online = False\n cls.state = State.playing\n cls.classes = classes\n cls.guiclasses = guiclasses\n\n # set up pygame and init\n pygame.init()\n\n # Set up the window\n cls.screen = pygame.display.set_mode(\n tuple(cls.configs['options']['resolution']),\n 0,\n 32)\n classes.screen = cls.screen\n guiclasses.screen = cls.screen",
"def mediaConnection(self):\n\n self.mediaPlayer.setVideoOutput(self.videowidget)\n\n # media player signals\n self.mediaPlayer.stateChanged.connect(self.mediastate_changed)\n self.mediaPlayer.positionChanged.connect(self.position_changed)\n self.mediaPlayer.durationChanged.connect(self.duration_changed)",
"def play(self):\n pass",
"def __init__(self, handler: mediaHandler):\n\n self.handler = handler",
"async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:\n hass.data[DOMAIN] = {}\n websocket_api.async_register_command(hass, websocket_browse_media)\n websocket_api.async_register_command(hass, websocket_resolve_media)\n frontend.async_register_built_in_panel(\n hass, \"media-browser\", \"media_browser\", \"hass:play-box-multiple\"\n )\n local_source.async_setup(hass)\n await async_process_integration_platforms(\n hass, DOMAIN, _process_media_source_platform\n )\n return True",
"async def async_media_play(self) -> None:\n await self._volumio.play()",
"def __init__(self, servicename):\n bus = pydbus.SessionBus()\n self.name = servicename\n self._proxy = bus.get(self.name, '/org/mpris/MediaPlayer2')\n self.player = self._proxy[self.player_interface]\n self.properties = self._proxy[self.properties_interface]\n # tracklist is an optional interface\n try:\n self.tracklist = self._proxy[self.tracklist_interface]\n except KeyError:\n self.tracklist = None\n # playlists is an optional interface\n try:\n self.playlists = self._proxy[self.playlists_interface]\n except KeyError:\n self.playlists = None",
"def setup(bot: Bot) -> None:\n bot.add_cog(Armory(bot))",
"async def set_player(self, player: Optional[andesite.Player]) -> None:\n ...",
"def play(self, *args, **kwargs) -> None:\n raise NotImplementedError()",
"def play(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackPlay())",
"def load(self):\n Logger.info(\"VLCPlayer: Entering load\")\n self._load_player(self.source)\n self._set_volume(self.volume)",
"async def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n entities = [KaleidescapeMediaPlayer(hass.data[KALEIDESCAPE_DOMAIN][entry.entry_id])]\n async_add_entities(entities)",
"def setup(self):\n # Set up the player\n self.player_sprite = arcade.Sprite(\"Sprites/Jugador/Jugador.jpg\", SPRITE_SCALING)\n self.player_sprite.center_x = 100\n self.player_sprite.center_y = 100\n self.player_list = arcade.SpriteList()\n self.player_list.append(self.player_sprite)\n\n # Listado de habitaciones\n self.rooms = []\n self.rooms.append(setup_pueblo())\n\n #Contador de habitación\n self.current_room = 0\n\n #Fisicas\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list)"
]
| [
"0.6124019",
"0.6033745",
"0.57720435",
"0.5672428",
"0.5659317",
"0.56030464",
"0.55808127",
"0.5578291",
"0.5571156",
"0.5568045",
"0.5557043",
"0.55538464",
"0.55161655",
"0.5514134",
"0.55140793",
"0.5498919",
"0.54745305",
"0.54496276",
"0.5445883",
"0.54299134",
"0.5420303",
"0.54156667",
"0.53744406",
"0.5367684",
"0.53601557",
"0.5349447",
"0.53464866",
"0.5336265",
"0.533314",
"0.5328958"
]
| 0.642649 | 0 |
Initialize an Arlo media player. | def __init__(self, name, device):
self._name = name
self._unique_id = device.entity_id
self._device = device
self._name = name
self._volume = None
self._muted = None
self._state = None
self._shuffle = None
self._position = 0
self._track_id = None
self._playlist = []
_LOGGER.info("ArloMediaPlayer: %s created", self._name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_video(self):\n\n assert self.container is None\n\n retry = 3\n while self.container is None and 0 < retry:\n retry -= 1\n try:\n self.container = av.open(self.tello.get_video_stream())\n except av.AVError as ave:\n print(ave)\n print('retry...')\n\n\n assert self.container is not None",
"def __init__(self, handler: mediaHandler):\n\n self.handler = handler",
"def init_player():\n global active_track_idx\n global track_last_slided_pos\n global track_last_paused_pos\n global track_total_play_time \n\n # INITIALIZE Player\n active_track_idx = -1\n cancel_update_play_time_loop()\n cancel_track_end_event_loop()\n track_status.set(\"---\")\n track_title.set(\"--- : \")\n play_pause_btn.configure(image=play_img)\n track_last_slided_pos = 0\n track_last_paused_pos = 0\n track_total_play_time = 0\n track_pos_label.configure(text=\"00:00\")\n track_length_label.configure(text=\"00:00\")\n track_pos_slider.configure(state=\"disabled\")\n track_pos.set(0)",
"def __init__(self):\n\t\tself._logger = None\n\t\tself._instanciate_logger()\n\t\tself._video_manager = VideoManager(self, self._logger)\n\t\tself._video_thread = None\n\t\tself._audio_manager = AudioManager(self, self._logger)\n\t\tself._audio_thread = None\n\t\tself._input_thread = None\n\t\tself._trigger_manager = None\n\t\tself.is_running = False",
"def __init__(self, player):\n\t\tself.player = player",
"def OnPlay(self):\r\n # check if there is a file to play, otherwise open a\r\n # Tk.FileDialog to select a file\r\n print(\"1-1\")\r\n\r\n\r\n self.Media = self.Instance.media_new(self.youtube_url)\r\n self.player.set_media(self.Media)\r\n\r\n # set the window id where to render VLC's video output\r\n if platform.system() == 'Windows':\r\n print(\"1-3\")\r\n self.player.set_hwnd(self.GetHandle())\r\n else:\r\n print(\"1-4\")\r\n self.player.set_xwindow(self.GetHandle()) # this line messes up windows\r\n # FIXME: this should be made cross-platform\r\n\r\n # Try to launch the media, if this fails display an error message\r\n if self.player.play() == -1:\r\n print(\"1-6\")\r\n self.errorDialog(\"Unable to play.\")",
"def __init__(self, session_type):\n\n self._media_session = MediaSession(session_type)\n\n self._signals = []\n self._signals.append(self._media_session.connect(\"prepared\",\n self.on_media_session_prepared))\n self._signals.append(self._media_session.connect(\"ready\",\n self.on_media_session_ready))",
"def __init__(self, player):\n self.player = player",
"def __init__(self):\n super().__init__(interface.Audio, DEFAULT_PRIORITIES)",
"def __init__(self, settings):\n super().__init__(settings, self.player_info_url, Player)",
"def prepare_media(self):\n start=self.get_pref('starttime')\n stop=self.get_pref('stoptime')\n## volume=limit(self.get_pref('volume'),0,200) # use limit() to ensure that volume pref is sensible...\n m=self.instance.media_new(self.mrl())\n# NONE of the following options work\n# m=self.instance.media_new(self.mrl(),'gain=0.2')\n## m=self.instance.media_new(self.mrl(),'sout-raop-volume=%s' % volume)\n# m=self.instance.media_new(self.mrl(),'audio-replay-gain-mode=track','--audio-replay-gain-default=0.2')\n if start:\n m.add_options('start-time=%s' % start) \n if stop:\n m.add_options('stop-time=%s' % stop) \n# the following test code DOES NOT WORK, though it does in cvlc at the command line, eg > cvlc my.mp3 --gain=0.2\n# gain=\"1.5\"\n# print \"SETTING GAIN for %s at %s%%\" % (self.uid,gain)\n# m.add_option('gain=%s' % gain)\n return m",
"async def async_setup_platform(hass, _config, async_add_entities, _discovery_info=None):\n arlo = hass.data.get(COMPONENT_DATA)\n if not arlo:\n return\n\n players = []\n for camera in arlo.cameras:\n if camera.has_capability(MEDIA_PLAYER_KEY):\n name = \"{0}\".format(camera.name)\n players.append(ArloMediaPlayer(name, camera))\n\n async_add_entities(players)",
"def __init__(self, config, core):\n logger.debug(\n \"__init__(%s, %s)\",\n config, core)\n\n super(AutoplayFrontend, self).__init__()\n self.core = core\n self.config = config[Extension.ext_name]\n\n # State file\n data_dir = Extension.get_data_dir(config)\n self.statefile = pathlib.Path(data_dir) / 'autoplay.state'\n logger.debug(\n \"Use '%s' as statefile.\",\n self.statefile)",
"def init(cls):\n\n cls.configs = yaml.load( file('../local/config.yaml') )\n cls.is_online = False\n cls.state = State.playing\n cls.classes = classes\n cls.guiclasses = guiclasses\n\n # set up pygame and init\n pygame.init()\n\n # Set up the window\n cls.screen = pygame.display.set_mode(\n tuple(cls.configs['options']['resolution']),\n 0,\n 32)\n classes.screen = cls.screen\n guiclasses.screen = cls.screen",
"def __init__(self, **kwargs):\n self.songs = SongList()\n self.song = Song()\n self.songs.load_songs(FILE_NAME)\n super(SongsToLearnApp, self).__init__(**kwargs)",
"def __init__(self, vlc_player, drone_gui):\n QMainWindow.__init__(self)\n self.setWindowTitle(\"VLC Drone Video Player\")\n\n # save the media player\n self.mediaplayer = vlc_player\n\n # need a reference to the main drone vision class\n self.drone_vision = drone_gui\n\n # create the GUI\n self.createUI()",
"def _load_player(self, filename):\n self._unload_player()\n\n Logger.info(\"VLCPlayer: Loading player\")\n SoundVLCPlayer.player = player = self.instance.media_player_new()\n media = player.set_mrl(filename)\n player.event_manager().event_attach(\n EventType.MediaPlayerEndReached, self._track_finished)\n media.parse() # Determine duration\n self._length = media.get_duration() / 1000.0\n media.release()",
"def load(self):\n Logger.info(\"VLCPlayer: Entering load\")\n self._load_player(self.source)\n self._set_volume(self.volume)",
"def __init__(self, *args, **kwargs):\n super(Player, self).__init__(*args, **kwargs)",
"def __init__(self, API, playlist_uri):\n\n self.API = API\n self.playlist_uri = playlist_uri\n self.metadata = None",
"def __init__(self, servicename):\n bus = pydbus.SessionBus()\n self.name = servicename\n self._proxy = bus.get(self.name, '/org/mpris/MediaPlayer2')\n self.player = self._proxy[self.player_interface]\n self.properties = self._proxy[self.properties_interface]\n # tracklist is an optional interface\n try:\n self.tracklist = self._proxy[self.tracklist_interface]\n except KeyError:\n self.tracklist = None\n # playlists is an optional interface\n try:\n self.playlists = self._proxy[self.playlists_interface]\n except KeyError:\n self.playlists = None",
"def init():\n # Load images here\n assets[\"teapot\"] = pg.image.load(\"teapot.png\")\n\n # Load sounds here\n assets[\"plong\"] = pg.mixer.Sound(\"plong.wav\")",
"def init():\n # Load images here\n assets[\"teapot\"] = pg.image.load(\"teapot.png\")\n\n # Load sounds here\n assets[\"plong\"] = pg.mixer.Sound(\"plong.wav\")",
"def initAudio(self):\n\t\t# Initialize pitch detection\n\t\tself.listener = PitchDetect(channels=1)\n\t\tself.listener.listen()\n\t\tself.recording = False\n\t\tself.paused = False",
"def setUp(self):\n self.player = Player()",
"def initialize(self):\n self.ha_url = self.args.get(\"ha_url\", None)\n self.use_current_brightness = self.args.get(\"use_current_brightness\", False)\n self.condition = self.args.get(\"condition\")\n self.lights = self.args[\"lights\"]\n self.listen_state(self.change_lights_color, self.args[\"media_player\"], attribute = self.args.get(\"photo_attribute\", \"entity_picture\"))",
"def __init__(self):\n self.type = 'Gstreamer'\n self.pulsesrc = Gst.ElementFactory.make(\"pulsesrc\", \"pulsesrc\")\n if self.pulsesrc is None:\n self._error(\"Error loading pulsesrc GST plugin. You probably need the gstreamer1.0-pulseaudio package\")\n\n self.audioconvert = Gst.ElementFactory.make(\"audioconvert\", \"audioconvert\")\n self.audioresample = Gst.ElementFactory.make(\"audioresample\", \"audioresample\")\n self.fakesink = Gst.ElementFactory.make(\"fakesink\", \"fakesink\")\n\n # Generate Gstreamer pipeline (from source to sink)\n self.pipeline = Gst.Pipeline()\n for element in [self.pulsesrc, self.audioconvert, self.audioresample, self.fakesink]:\n self.pipeline.add(element)\n self.pulsesrc.link(self.audioconvert)\n self.audioconvert.link(self.audioresample)",
"def __init__(self, quality: int = 7, bitrate: int = 64):\n self._output = mp3.Mp3(quality, bitrate)\n self._output.add_callback(self._enqueue)\n self._socket = None\n self._source = None\n self._endpoint = None\n self._password = None\n # Icecast doesn't actually support chunked encoding\n self._chunk = False",
"def __init__(self, connection):\n\t\tself.name = None\n\t\tself.remote_player_proxy = RemotePlayerProxy(connection)",
"def __init__(self, media, username, password):\n self.media = media\n self.username = username\n self.password = password"
]
| [
"0.63464123",
"0.6276088",
"0.6197421",
"0.60042477",
"0.5966629",
"0.5964369",
"0.59573257",
"0.5955853",
"0.5942402",
"0.59255",
"0.59211975",
"0.588976",
"0.58767784",
"0.58719885",
"0.5865433",
"0.5840495",
"0.5824255",
"0.581618",
"0.58082753",
"0.57726705",
"0.57574016",
"0.57477105",
"0.57477105",
"0.5705991",
"0.5695774",
"0.5682737",
"0.5663709",
"0.56472623",
"0.5625077",
"0.56019044"
]
| 0.659764 | 0 |
Audit vault and strategy configuration. | def audit():
governance = web3.ens.resolve('ychad.eth')
registry = load_registry()
vaults = load_vaults(registry)
for v in vaults:
if v.vault.governance() != governance:
secho(f'{v.name} vault governance == {v.vault.governance()}', fg='red')
print(f'{v.vault}.setGovernance({governance})')
if v.strategy.governance() != governance:
secho(f'{v.name} strategy governance == {v.strategy.governance()}', fg='red')
print(f'{v.strategy}.setGovernance({governance})') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def setup(bot: DreamBot) -> None:\n\n await bot.add_cog(Audit(bot))\n bot_logger.info('Completed Setup for Cog: Audit')",
"def vault(self):",
"def configure(self, account_id):\n print(\"Configuring Vault\")\n client = self.connect(VAULT_TOKEN)\n\n # Audit Backend\n if 'syslog/' not in client.sys.list_enabled_audit_devices():\n audit_options = {\n 'log_raw': 'True',\n }\n client.sys.enable_audit_device('syslog', options=audit_options)\n else:\n print(\"audit_backend already created.\")\n\n # Policies\n policies = []\n path = os.path.join(POLICY_DIR, \"*.hcl\")\n for policy in glob.glob(path):\n name = os.path.basename(policy).split('.')[0]\n policies.append(name)\n with open(policy, 'r') as fh:\n client.sys.create_or_update_policy(name, fh.read())\n\n # AWS Authentication Backend\n # Enable AWS auth in Vault\n if 'aws/' not in client.sys.list_auth_methods():\n try:\n client.sys.enable_auth_method('aws')\n except Exception as e:\n raise VaultError(\"Error while enabling auth back end. {}\".format(e))\n else:\n print(\"aws auth backend already created.\")\n\n #Define policies and arn \n arn = 'arn:aws:iam::{}:instance-profile/'.format(account_id)\n\n #For each policy configure the policies on a role of the same name\n for policy in policies:\n client.create_ec2_role(policy,\n bound_iam_instance_profile_arn = arn + policy,\n policies = policy,\n mount_point = 'aws')\n print('Successful write to aws/role/' + policy)\n \n # AWS Secret Backend\n if 'aws/' not in client.sys.list_mounted_secrets_engines():\n try:\n client.sys.enable_secrets_engine('aws')\n except Exception as e:\n raise VaultError('Error while enabling secret back end. {}'.format(e))\n else:\n print(\"aws secret backend already created.\")\n\n path = os.path.join(POLICY_DIR, \"*.iam\")\n for iam in glob.glob(path):\n name = os.path.basename(iam).split('.')[0]\n with open(iam, 'r') as fh:\n # if we json parse the file first we can use the duplicate key trick for comments\n client.secrets.aws.create_or_update_role(name, 'iam_user', policy_document = fh.read())",
"def enable_audit_monitoring():\n __enable_data_access_logging()\n __enable_log_streaming()\n __create_audit_alerts()\n __get_incidents_history()",
"def __init__(__self__, *,\n audit_mode: Optional[pulumi.Input['WorkloadConfigAuditMode']] = None):\n if audit_mode is not None:\n pulumi.set(__self__, \"audit_mode\", audit_mode)",
"def __init__(self, client: Client, config_file: str = DEFAULT_CONFIG) -> None:\n self.logger.info(\"Initializing Audit module\")\n self.owner = os.getenv(\"BOT_OWNER\", \"\")\n self.config = ConfigFile()\n self.config.load(config_file)\n self.allow_list: List[str] = self.config.config.get(\"allow-list\", [])\n if not self.config.config:\n self.config.create(\"module\", self.MODULE_NAME)\n self.config.create(\"version\", self.MODULE_VERSION)",
"def setup_audit_log(cfg=CFG):\n if not runez.DRYRUN and not runez.log.file_handler:\n runez.log.setup(\n file_format=\"%(asctime)s %(timezone)s [%(process)d] %(context)s%(levelname)s - %(message)s\",\n file_level=logging.DEBUG,\n file_location=cfg.meta.full_path(\"audit.log\"),\n greetings=\":: {argv}\",\n rotate=\"size:500k\",\n rotate_count=1,\n )",
"def test_otoroshi_controllers_adminapi_events_controller_audit_events(self):\n pass",
"async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')",
"def __enable_data_access_logging():\n _tempFile = \"tmp_audit_config.json\"\n\n auditConfig = {\n \"auditConfigs\": [\n {\n \"auditLogConfigs\": [\n {\n \"logType\": \"ADMIN_READ\"\n },\n {\n \"logType\": \"DATA_WRITE\"\n },\n {\n \"logType\": \"DATA_READ\"\n }\n ],\n \"service\": \"allServices\",\n }\n ]\n }\n\n # get current policy\n run_command('gcloud projects get-iam-policy {} --format=json >>{}'.format(PROJECT_ID, _tempFile))\n\n # merge it with above-defined config\n merge_JSON(auditConfig, _tempFile)\n\n # set the policy\n run_command('gcloud projects set-iam-policy {} {}'.format(PROJECT_ID, _tempFile))\n\n # delete the temp file\n run_command('rm {}'.format(_tempFile))",
"def _init_and_add_listeners_to_stage_traits(self):\n self.stages[\"Preprocessing\"].config.tracking_tool = self.stages[\"Diffusion\"].config.tracking_processing_tool\n self.stages[\"Preprocessing\"].config.act_tracking = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.use_act\n self.stages[\"Preprocessing\"].config.gmwmi_seeding = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.seed_from_gmwmi\n self.stages[\"Registration\"].config.tracking_tool = self.stages[\"Diffusion\"].config.tracking_processing_tool\n self.stages[\"Registration\"].config.act_tracking = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.use_act\n self.stages[\"Registration\"].config.gmwmi_seeding = self.stages[\"Diffusion\"].config.mrtrix_tracking_config.seed_from_gmwmi\n\n self.stages[\"Connectome\"].config.on_trait_change(\n self.update_vizualization_layout, \"circular_layout\"\n )\n self.stages[\"Connectome\"].config.on_trait_change(\n self.update_vizualization_logscale, \"log_visualization\"\n )\n self.stages[\"Diffusion\"].config.on_trait_change(\n self.update_outputs_recon, \"recon_processing_tool\"\n )\n self.stages[\"Diffusion\"].config.on_trait_change(\n self.update_tracking_tool, \"tracking_processing_tool\"\n )\n self.stages[\"Diffusion\"].config.mrtrix_tracking_config.on_trait_change(\n self.update_preprocessing_act, \"use_act\"\n )\n self.stages[\"Diffusion\"].config.dipy_tracking_config.on_trait_change(\n self.update_preprocessing_act, \"use_act\"\n )\n self.stages[\"Diffusion\"].config.mrtrix_tracking_config.on_trait_change(\n self.update_preprocessing_gmwmi, \"seed_from_gmwmi\"\n )",
"def _audit_cli_args(self):\n\n args = [\n \"--operation=audit\",\n \"--operation=status\",\n \"--logtostderr\",\n ]\n\n return args",
"def generate_config(context):\n\n project_id = context.env['project']\n owners_group = context.properties['owners_group']\n auditors_group = context.properties['auditors_group']\n resources = []\n\n # The GCS bucket to hold logs.\n logs_bucket = context.properties.get('logs_gcs_bucket')\n if logs_bucket:\n resources.append({\n 'name': logs_bucket['name'],\n 'type': 'storage.v1.bucket',\n 'properties': {\n 'location': logs_bucket['location'],\n 'storageClass': logs_bucket['storage_class'],\n 'lifecycle': {\n 'rule': [{\n 'action': {\n 'type': 'Delete'\n },\n 'condition': {\n 'age': logs_bucket['ttl_days'],\n 'isLive': True,\n },\n }],\n },\n },\n 'accessControl': {\n 'gcpIamPolicy': {\n 'bindings': [\n {\n 'role': 'roles/storage.admin',\n 'members': ['group:' + owners_group,],\n },\n {\n 'role': 'roles/storage.objectCreator',\n 'members': ['group:[email protected]'],\n },\n {\n 'role': 'roles/storage.objectViewer',\n 'members': ['group:' + auditors_group,],\n },\n ],\n },\n },\n })\n\n # BigQuery dataset to hold audit logs.\n logs_dataset = context.properties.get('logs_bigquery_dataset')\n if logs_dataset:\n dataset_id = logs_dataset['name']\n resources.append({\n 'name': dataset_id,\n 'type': 'bigquery.v2.dataset',\n 'properties': {\n 'datasetReference': {\n 'datasetId': dataset_id,\n },\n 'location': logs_dataset['location'],\n },\n })\n\n # Update permissions for the dataset. This also removes the deployment\n # manager service account's access.\n resources.append({\n 'name': 'update-' + dataset_id,\n 'action': 'gcp-types/bigquery-v2:bigquery.datasets.patch',\n 'properties': {\n 'projectId':\n project_id,\n 'datasetId':\n dataset_id,\n 'access': [\n {\n 'role': 'OWNER',\n 'groupByEmail': owners_group,\n },\n {\n 'role': 'READER',\n 'groupByEmail': auditors_group,\n },\n {\n 'role': 'WRITER',\n 'userByEmail': logs_dataset['log_sink_service_account'],\n },\n ],\n },\n 'metadata': {\n 'dependsOn': [dataset_id],\n },\n })\n\n return {'resources': resources}",
"def post(self, audit_p):\n context = pecan.request.context\n policy.enforce(context, 'audit:create',\n action='audit:create')\n audit = audit_p.as_audit(context)\n\n if self.from_audits:\n raise exception.OperationNotPermitted\n\n if not audit._goal_uuid:\n raise exception.Invalid(\n message=_('A valid goal_id or audit_template_id '\n 'must be provided'))\n\n strategy_uuid = audit.strategy_uuid\n no_schema = True\n if strategy_uuid is not None:\n # validate parameter when predefined strategy in audit template\n strategy = objects.Strategy.get(pecan.request.context,\n strategy_uuid)\n schema = strategy.parameters_spec\n if schema:\n # validate input parameter with default value feedback\n no_schema = False\n utils.StrictDefaultValidatingDraft4Validator(schema).validate(\n audit.parameters)\n\n if no_schema and audit.parameters:\n raise exception.Invalid(_('Specify parameters but no predefined '\n 'strategy for audit, or no '\n 'parameter spec in predefined strategy'))\n\n audit_dict = audit.as_dict()\n # convert local time to UTC time\n start_time_value = audit_dict.get('start_time')\n end_time_value = audit_dict.get('end_time')\n if start_time_value:\n audit_dict['start_time'] = start_time_value.replace(\n tzinfo=tz.tzlocal()).astimezone(\n tz.tzutc()).replace(tzinfo=None)\n if end_time_value:\n audit_dict['end_time'] = end_time_value.replace(\n tzinfo=tz.tzlocal()).astimezone(\n tz.tzutc()).replace(tzinfo=None)\n\n new_audit = objects.Audit(context, **audit_dict)\n new_audit.create()\n\n # Set the HTTP Location Header\n pecan.response.location = link.build_url('audits', new_audit.uuid)\n\n # trigger decision-engine to run the audit\n if new_audit.audit_type == objects.audit.AuditType.ONESHOT.value:\n self.dc_client.trigger_audit(context, new_audit.uuid)\n\n return Audit.convert_with_links(new_audit)",
"def vault(self, vault):\n\n self._vault = vault",
"def initialize(self, account_id, secrets = 1, threshold = 1):\n\n client = self.connect()\n if client.sys.is_initialized():\n print(\"Vault is already initialized\")\n if client.sys.is_sealed():\n print(\"Unsealing Vault\")\n self.unseal()\n else:\n print(\"Vault already unsealed\")\n else:\n print(\"Initializing with {} secrets and {} needed to unseal\".format(secrets, threshold))\n result = client.sys.initialize(secret_shares=1,\n secret_threshold=1,\n stored_shares=1,\n recovery_shares=secrets,\n recovery_threshold=threshold)\n\n token_file = self.path(VAULT_TOKEN)\n key_file = self.path(VAULT_KEY)\n with open(token_file, \"w\") as fh:\n fh.write(result[\"root_token\"])\n for i in range(secrets):\n with open(key_file + str(i+1), \"w\") as fh:\n fh.write(result[\"recovery_keys\"][i])\n\n # DP TODO: refactor code so that the root token is revoked after configuration?\n # DP ???: If no root token, how to auth for populating future values?\n print()\n print(\"========= WARNING WARNING WARNING =========\")\n print(\"= Vault root token and recovery keys were =\")\n print(\"= written to disk. PROTECT these files. =\")\n print(\"========= WARNING WARNING WARNING =========\")\n print()\n\n # DP NOTE: When using the DynamoDB backend it is common for right after\n # initializing for requests to Vault to response with the error\n # > local node not active but active cluster node not found <\n # If given a little bit of time Vault will respond successfully\n # to requests\n\n def poll():\n \"\"\"Check to see if Vault responds to a request without an error\"\"\"\n try:\n self.connect(VAULT_TOKEN).sys.list_enabled_audit_devices()\n return True\n except hvac.exceptions.InternalServerError as ex:\n if str(ex).startswith('local node not active but active cluster node not found'):\n return False\n raise\n\n print(\"Waiting for Vault to finish initialization \", end='', flush=True)\n step, remaining = 10, 60\n while remaining >= 0:\n if poll():\n break\n\n print(\".\", end='', flush=True)\n remaining -= step\n time.sleep(step)\n if remaining < 0:\n raise Exception(\"Vault not finished initializing\")\n print(\" done\")\n\n self.configure(account_id)",
"def test_backup_restore_with_audit(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n audit_obj = audit(AUDITBACKUPID, self.backupset.cluster_host)\n status = audit_obj.getAuditStatus()\n self.log.info(\"Audit status on {0} is {1}\".format(self.backupset.cluster_host.ip, status))\n if not status:\n self.log.info(\"Enabling audit on {0}\".format(self.backupset.cluster_host.ip))\n audit_obj.setAuditEnable('true')\n self.backup_create()\n self.backup_cluster()\n field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='backup'))\n self.assertTrue(field_verified, \"One of the fields is not matching\")\n self.assertTrue(value_verified, \"Values for one of the fields is not matching\")\n audit_obj = audit(AUDITBACKUPID, self.backupset.restore_cluster_host)\n status = audit_obj.getAuditStatus()\n self.log.info(\"Audit status on {0} is {1}\".format(self.backupset.restore_cluster_host.ip, status))\n if not status:\n self.log.info(\"Enabling audit on {0}\".format(self.backupset.restore_cluster_host.ip))\n audit_obj.setAuditEnable('true')\n self.backup_restore()\n audit_obj = audit(AUDITRESTOREID, self.backupset.restore_cluster_host)\n field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='restore'))\n self.assertTrue(field_verified, \"One of the fields is not matching\")\n self.assertTrue(value_verified, \"Values for one of the fields is not matching\")",
"def sql_server_audit_config(self) -> 'outputs.SqlServerAuditConfigResponse':\n return pulumi.get(self, \"sql_server_audit_config\")",
"def add_audit(self, entity_name, object_name, operation,\n data, auth_ctx, session):",
"def status(self):\n\n client = self.connect()\n if not client.sys.is_initialized():\n print(\"Vault is not initialized\")\n return\n else:\n print(\"Vault is initialized\")\n\n if client.sys.is_sealed():\n print(\"Vault is sealed\")\n print(client.seal_status)\n return\n else:\n print(\"Vault is unsealed\")\n\n # read in the Vault access token\n client = self.connect(VAULT_TOKEN)\n print()\n print(\"Key Status\")\n print(json.dumps(client.key_status))\n\n print()\n print(\"HA Status\")\n print(json.dumps(client.ha_status))\n\n print()\n print(\"Secret Backends\")\n print(json.dumps(client.sys.list_mounted_secrets_engines(), indent=True))\n\n print()\n print(\"Policies\")\n print(json.dumps(client.sys.list_policies()))\n\n print()\n print(\"Audit Backends\")\n print(json.dumps(client.sys.list_enabled_audit_devices(), indent=True))\n\n print()\n print(\"Auth Backends\")\n print(json.dumps(client.sys.list_auth_methods(), indent=True))",
"def audit(self):\n self.ping()",
"def audit_log(self, account_id):\n from pureport_client.commands.accounts.audit_log import Command\n return Command(self.client, account_id)",
"def audit(cls):\n old_save = cls.save\n old_delete = cls.delete\n def save(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n return old_save(self, *arg, **kw)\n\n\n def delete(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n cls.save(self)\n return old_delete(self, *arg, **kw)\n cls.save = save\n cls.delete = delete\n cls.last_user_id = models.IntegerField(null=True, blank=True, editable=False)\n return cls",
"def get_audit(self, query, session):\n raise NotImplementedError()",
"def config(ctx):\n return",
"def get_test_audit(context, **kw):\n obj_cls = objects.Audit\n db_data = db_utils.get_test_audit(**kw)\n obj_data = _load_related_objects(context, obj_cls, db_data)\n\n return _load_test_obj(context, obj_cls, obj_data, **kw)",
"def __init__(self) -> None:\n name = \"Ensure that RDS Cluster audit logging is enabled for MySQL engine\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)",
"def config():\n\n # Remove all log files from the assets folder.\n for log_file in get_log_files(TESTS_ASSETS_VISION_DIR):\n os.remove(log_file)\n\n # Reconfigure the logger to use a separate folder (instead of the real logs)\n Log.reconfigure(log_directory=TESTS_ASSETS_VISION_DIR)",
"def configure_step(self):\n\n pass",
"def audit_log_configs(self) -> Sequence['outputs.AuditLogConfigResponse']:\n return pulumi.get(self, \"audit_log_configs\")"
]
| [
"0.5853268",
"0.58281404",
"0.58206046",
"0.5669815",
"0.5580182",
"0.5573544",
"0.55566853",
"0.5363275",
"0.53354853",
"0.52385104",
"0.52081513",
"0.5198259",
"0.5194844",
"0.5165448",
"0.5140817",
"0.51114434",
"0.50980794",
"0.5084414",
"0.50298756",
"0.49605715",
"0.4932714",
"0.4932545",
"0.48934424",
"0.48918435",
"0.4866144",
"0.48179117",
"0.4816808",
"0.4804417",
"0.4794189",
"0.47927418"
]
| 0.7084648 | 0 |
n = number of sides, s = side length, both positive integers this function returns the sum of the area and square perimeter of the polygon described by n and s | def polysum(n, s):
area = 0
#avoiding division by zero
if n != 0:
area = (0.25 * n * (s**2)) / math.tan(math.pi / n)
perimeter = n * s
return (round(area + perimeter**2, 4)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def area_polygon(n, s):\n area = ((float(1)/float(4)) * n * s ** 2) / (math.tan(math.pi / n))\n return area",
"def polysum(n, s):\n\n import math\n\n area = 0.25*n*s**2/math.tan(math.pi/n)\n peri = s*n\n summary = area + peri**2\n return round(summary, 4)",
"def square_area(side):\n return side**2",
"def polygonal_number(s, n):\n return (n*n*(s-2)-n*(s-4))/2",
"def area_triangle_sss(side1,side2,side3):\n semi_perim=(side1+side2+side3)/2.0\n return math.sqrt(semi_perim*\n (semi_perim - side1)*\n (semi_perim - side2)*\n (semi_perim - side3)\n )",
"def area_triangle_sss(side1, side2, side3):\n \n # Use Heron's formula\n semiperim = (side1 + side2 + side3) / 2.0\n return math.sqrt(semiperim *\n (semiperim - side1) *\n (semiperim - side2) * \n (semiperim - side3))",
"def area_reg_polygon(sides: int, length: float) -> float:\r\n if not isinstance(sides, int) or sides < 3:\r\n raise ValueError(\r\n \"area_reg_polygon() only accepts integers greater than or \\\r\nequal to three as number of sides\"\r\n )\r\n elif length < 0:\r\n raise ValueError(\r\n \"area_reg_polygon() only accepts non-negative values as \\\r\nlength of a side\"\r\n )\r\n return (sides * length**2) / (4 * tan(pi / sides))\r\n return (sides * length**2) / (4 * tan(pi / sides))",
"def squareArea(sidelength):\n sidelength = float(sidelength)\n return sidelength**2",
"def perimeter(self):\n return sum([s.length for s in self.segments])",
"def area_square(side_length: float) -> float:\r\n if side_length < 0:\r\n raise ValueError(\"area_square() only accepts non-negative values\")\r\n return side_length**2",
"def area_equilat(side):\n\treturn side/2 * math.sqrt(side**2 - (side/2)**2)",
"def findArea(self):\n\n a, b = self.sides\n area = a * b\n print(f\"Are of rectangle is: {area}\")",
"def perimeter(self):\n return sum(seg.length for seg in self.segments) + \\\n sum([p.perimeter for p in self.subs])",
"def sum_of_squares(n):\n return (n * (n+1) * (2*n + 1)) / 6",
"def area_polygon(polygon):\n o = centroid_points(polygon)\n u = subtract_vectors(polygon[-1], o)\n v = subtract_vectors(polygon[0], o)\n a = 0.5 * length_vector(cross_vectors(u, v))\n for i in range(0, len(polygon) - 1):\n u = v\n v = subtract_vectors(polygon[i + 1], o)\n a += 0.5 * length_vector(cross_vectors(u, v))\n return a",
"def perimeter(self):\n return self.sidelength1 + self.sidelength2 + self.baselength1 + self.baselength2",
"def triangle_area(side1: number, side2: number, side3: number) -> number:\n s = (side1+side2+side3)/2\n area = sqrt(s*(s-side1)*(s-side2)*(s-side3))\n return sqrt(s*(s-side1)*(s-side2)*(s-side3))",
"def polygon_area(side_length, **kwargs):\n if not isinstance(side_length, int):\n raise ValueError('Positional argument `side_length` must be an integer')\n\n if not kwargs:\n raise ValueError('Missing keyword arguments!')\n else:\n if 'sides' not in kwargs:\n raise ValueError('Missing keyword argument `sides`')\n\n sides = kwargs['sides']\n\n if not isinstance(sides, int):\n raise ValueError('Keyword argument `sides` must be an integer')\n\n if sides < 3 or sides > 6:\n raise ValueError('Number of polygon sides must be within 3-6, but found {}'.format(sides))\n\n if sides == 3:\n return side_length * math.sqrt(3) / 4\n elif sides == 4:\n return side_length ** 2\n if sides == 5:\n return 5 * side_length ** 2 / (4 * math.tan(36))\n if sides == 6:\n return (3 * math.sqrt(3)) * side_length ** 2 / 2",
"def regular_polygon_area_equivalent_radius(n, radius=1.0):\n\n theta = 2 * np.pi / n\n\n r = np.sqrt((theta * radius ** 2) / np.sin(theta))\n return r",
"def perimeter(points):\n return sum(get_distances(points))",
"def rectangle_area(side1, side2):\n return float(side1) * float(side2)",
"def area_triangle(w, h):\n return w * h / 2",
"def AreaOfPolygon(points):\n # Note: area will be negative for clockwise shapes.\n # See http://paulbourke.net/geometry/polyarea/\n A = 0\n N = len(points)\n for i in xrange(0, N):\n x_i = points[i][0]\n y_i = points[i][1]\n x_ip1 = points[(i+1) % N][0]\n y_ip1 = points[(i+1) % N][1]\n A += (x_i * y_ip1 - x_ip1 * y_i)\n return A / 2",
"def polygon_area(pr, pc):\n pr = np.asarray(pr)\n pc = np.asarray(pc)\n return 0.5 * np.abs(np.sum((pc[:-1] * pr[1:]) - (pc[1:] * pr[:-1])))",
"def area(length, hypotenuse):\n side = int(length)* hypotenuse\n return round(side*2, 2) # returns the rounded area of the roof.",
"def island_perimeter(grid):\n \"\"\"island_perimeter - perimeter of the island\n Parameter\n ---------\n grid:\n list\n Return\n ------\n int\n \"\"\"\n total = 0\n\n rows = len(grid)\n columns = len(grid[0])\n\n for row in range(rows):\n for col in range(columns):\n array = grid[row][col]\n if array == 1:\n total += 4\n if row != 0 and grid[row-1][col] == 1:\n total -= 1\n if col != 0 and grid[row][col-1] == 1:\n total -= 1\n if row + 1 != rows and grid[row + 1][col] == 1:\n total -= 1\n if col + 1 != columns and grid[row][col + 1] == 1:\n total -= 1\n\n return total",
"def area(self):\n area = 0\n\n for room in self.rooms:\n area += room.polygon.area()\n\n for wall in self.walls:\n area += wall.polygon.area()\n\n return area",
"def island_perimeter(grid):\n total = 0\n for b in range(len(grid)):\n for a in range(len(grid[b])):\n # left corner\n if (a == 0) and (b == 0):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right corner\n elif (a == len(grid[b]) - 1) and b == 0:\n if grid[b][a] == 1:\n total = total + 2\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # lower-left corner\n elif a == 0 and b == (len(grid) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n # lower-right corner\n elif b == (len(grid) - 1) and a == (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # top edge\n elif (b == 0 and a > 0) and a < (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # left edge\n elif (b > 0 and b < (len(grid) - 1)) and ((a == 0) and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right edge\n elif (b > 0 and (b < len(grid) - 1)) and (a == len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # bottom edge\n elif (b == len(grid) - 1) and a > 0 and a < len(grid[b]) - 1:\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # cases that are neither edges nor corners\n elif (b > 0 and b < len(grid) - 1) and (a > 0 and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n return total",
"def area(triangles=None, crosses=None, sum=False):\n if crosses is None:\n crosses = cross(triangles)\n area = (np.sum(crosses**2, axis=1)**.5) * .5\n if sum:\n return np.sum(area)\n return area",
"def calculateperimeter(self):\r\n return (self.width * 2) + (self.height * 2)"
]
| [
"0.8268719",
"0.74184024",
"0.68404865",
"0.680953",
"0.67098576",
"0.6519029",
"0.6479288",
"0.64529556",
"0.63541496",
"0.63223577",
"0.6297985",
"0.6282858",
"0.62641627",
"0.6218108",
"0.6207018",
"0.6203102",
"0.6181889",
"0.61453784",
"0.6070842",
"0.60687673",
"0.60675716",
"0.60566014",
"0.60410655",
"0.6024938",
"0.6007097",
"0.6004254",
"0.5969303",
"0.59548926",
"0.5942619",
"0.59411925"
]
| 0.83128154 | 0 |
Print the \n separated lines in the string s, indented lvl levels. | def printIndent(s,lvl) :
for line in s.split('\n') :
print('%s%s' % (' '*lvl,line)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def indent_lines(s, n):\n return \"\\n\".join(map(lambda line: \" \" * n + line, \n s.split('\\n')))",
"def FormatLines(s, depth, reflow=True):\n if reflow:\n lines = _ReflowLines(s, depth)\n else:\n lines = [s]\n\n result = []\n for line in lines:\n line = (\" \" * TABSIZE * depth) + line + \"\\n\"\n result.append(line)\n return result",
"def indent(str, level):\n if level == 0: return str\n return \"\\n\".join(\"\\t\" * level + line for line in str.splitlines())",
"def _print_with_depth(self, string, depth):\n print(\"{0}{1}\".format(\" \" * depth, string))",
"def print_nodes(self, s):\n\t\treturn ' '.join((s.replace(\"\\n\",\" \")).split())",
"def indent(string, level=1):\n spaces = ' ' * (level * 4)\n return \"%s%s\" % (spaces, string)",
"def _display(s):\n if not isinstance(s, unicode):\n s = s.decode(\"utf-8\")\n s = _indent(_escaped_text_from_text(s, \"whitespace\"), 4)\n if not s.endswith('\\n'):\n s += '\\n'\n return s",
"def _indent_text(self, lines, level=1):\n prefix = ' ' * (4 * level)\n if isinstance(lines, basestring):\n return prefix + lines\n else:\n return '\\n'.join(\n prefix + line\n for line in lines\n )",
"def _ReflowLines(s, depth):\n size = MAX_COL - depth * TABSIZE\n if len(s) < size:\n return [s]\n\n lines = []\n cur = s\n padding = \"\"\n while len(cur) > size:\n i = cur.rfind(' ', 0, size)\n # XXX this should be fixed for real\n if i == -1 and 'GeneratorExp' in cur:\n i = size + 3\n assert i != -1, \"Impossible line %d to reflow: %r\" % (size, s)\n lines.append(padding + cur[:i])\n if len(lines) == 1:\n # find new size based on brace\n j = cur.find('{', 0, i)\n if j >= 0:\n j += 2 # account for the brace and the space after it\n size -= j\n padding = \" \" * j\n else:\n j = cur.find('(', 0, i)\n if j >= 0:\n j += 1 # account for the paren (no space after it)\n size -= j\n padding = \" \" * j\n cur = cur[i + 1:]\n else:\n lines.append(padding + cur)\n return lines",
"def newline(lines=1):\n\n # Print the new line iterated by the amount of new lines\n print('\\n' * lines)",
"def nl():\n\tprint(\"\")",
"def printstringtp2(xs): #Printing function\n for x in range(xs+1): #Outer loop for line iteration\n print(\"\\n\")\n for y in range(x):\n print(y,end=' ')",
"def prettyPrintStringHelper_ (s, stream, indent, pretty_print=True, indent_additive=4):\r\n stream.write(repr(s))",
"def node_s(self, lvl=0):\n s = \"\"\n for n in self.kids:\n s += \" \" * (lvl + 1) + n.node_s(lvl + 1) + \"\\n\\n\"\n return s",
"def getHorizontalTreeString(s):\n\t#we know first character is '[' so we can skip it:\n\toutputBuffer = [s[1]]\n\tnestingLevel = 0\n\tfor i in range(2,len(s)):\n\t\tcurrentChar = s[i]\n\t\tif currentChar == '[':\n\t\t\tnestingLevel += 1\n\t\t\toutputBuffer.append(' ')\n\t\telif currentChar == ']':\n\t\t\tnestingLevel -= 1\n\t\telif currentChar == ',':\n\t\t\toutputBuffer.append('\\n')\n\t\t\tfor _ in range(nestingLevel):\n\t\t\t\toutputBuffer.append(' ')\n\t\telif currentChar != ' ':\n\t\t\toutputBuffer.append(currentChar)\n\treturn \"\".join(outputBuffer)",
"def print_sub_section(self, s, level=0):\n section = s.capitalize()\n\n self.print_newline()\n self._write('%s+ %s\\n' % ('-' * level, section))\n self.print_newline()",
"def print_line(s, bold=False, underline=False, blinking=False, color=None, bgcolor=None, end='\\n'):\n s = get_line(s, bold=bold, underline=underline,\n blinking=blinking, color=color, bgcolor=bgcolor)\n print(s, end=end)",
"def recurse(n, s):\n print(f\"recurse n -> {n}\")\n print(f\"recurse s -> {s}\")\n if n == 0:\n print(s)\n else:\n recurse(n-1, n+s)",
"def indent(text, first_line=True, n=1, width=4):\n lines = text.split(\"\\n\")\n if not first_line:\n first = lines[0]\n lines = lines[1:]\n\n spaces = \" \" * (width * n)\n lines2 = [spaces + x for x in lines]\n\n if not first_line:\n lines2.insert(0, first)\n\n indented = \"\\n\".join(lines2)\n\n return indented",
"def _indent(s, width=4, skip_first_line=False):\n lines = s.splitlines(1)\n indentstr = ' '*width\n if skip_first_line:\n return indentstr.join(lines)\n else:\n return indentstr + indentstr.join(lines)",
"def BoothRecToString(s, indent=0):\n\n sp = \" \" * indent\n h = []\n n = []\n for i in list(s):\n if i == \"0\":\n h.append(\" \")\n n.append(\"0\")\n elif i == \"1\":\n h.append(\" \")\n n.append(\"1\")\n elif i == \"2\":\n h.append(\"^\")\n n.append(\"1\")\n return sp + \"\".join(h) + \"\\n\" + sp + \"\".join(n)",
"def printAsHorizontalTree(s):\n\tERROR_MSG = \"Invalid!\"\n\tif syntaxValid(s):\n\t\tprint(getHorizontalTreeString(s))\n\telse:\n\t\tprint(ERROR_MSG)",
"def indent_multiline_string(in_string, spaces):\n if in_string.find(\"\\n\") != -1:\n return \"\\n\".join([(' ' * spaces) + line for line in in_string.split(\"\\n\")])\n else:\n return in_string",
"def print_string(self, s):\n self._write('%s\\n' % s)",
"def pretty_lines(self, out_lines=None, indent_level=0):\n if out_lines is None:\n out_lines = []\n\n out_lines.append(\"\".join((\" \" * indent_level, repr(self), \":\")))\n for sub_thing in self.contents:\n if isinstance(sub_thing, Node):\n sub_thing.pretty_lines(out_lines, indent_level + 1)\n else:\n out_lines.append(\"\".join(\n (\" \" * (indent_level + 1), repr(sub_thing))))\n return out_lines",
"def verbose(string, level, indent=None):\n if args.verbose:\n if args.verbose > level:\n if indent is None:\n if level <= LEVEL_4:\n indent = \" \" * level\n else:\n indent = \" \"\n print (indent + string)\n return",
"def _newline(self):\n if prettyprint:\n return '\\n' + self._indent_spaces()\n else:\n return ''",
"def winNewLines(inString):\n return reUnixNewLine.sub('\\r\\n',inString)",
"def debug(self, s, level=1):\n if self._debug >= level:\n print(s)",
"def indent(text, n=4):\n if not text:\n return \"\"\n i = \" \" * n\n return i + text.replace(\"\\n\", \"\\n\" + i)"
]
| [
"0.709268",
"0.66912967",
"0.6484763",
"0.62385815",
"0.6086636",
"0.60229385",
"0.6016925",
"0.5888356",
"0.58879775",
"0.55820423",
"0.5500408",
"0.54932576",
"0.5424956",
"0.5420362",
"0.53914034",
"0.5383994",
"0.5332292",
"0.53215086",
"0.52720195",
"0.52537227",
"0.524953",
"0.5249379",
"0.5240964",
"0.5237342",
"0.52344054",
"0.52206546",
"0.52201355",
"0.52170056",
"0.52020407",
"0.51848936"
]
| 0.7449001 | 0 |
Return the score of the game if L is the next to move. L looks for high scores. The score is the first item in the tuple that is returned. The remaining items are the sequence of games that led to this score. | def scoreL(self) :
if self.leafL() :
#
# Here L has no possible moves. Return
# the leaf score at the current leaf.
#
return self.leafScore(), self
else :
games = self.L()
max_g = games[0]
max_score = max_g.scoreR()
for g in games[1:] :
score = g.scoreR()
if score[0] > max_score[0] :
max_g = g
max_score = score
return (max_score+(max_g,)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scoreR(self) :\n if self.leafR() :\n return self.leafScore(), self\n else :\n games = self.R()\n min_g = games[0]\n min_score = min_g.scoreL()\n for g in games[1:] :\n score = g.scoreL()\n if score[0] < min_score[0] :\n min_g = g\n min_score = score\n return (min_score+(min_g,))",
"def mm_move(board, player): \r\n if board.check_win() != None:\r\n score = SCORES[board.check_win()]\r\n return score, (-1,-1)\r\n else:\r\n best_score = -2\r\n score_list = []\r\n move_list = []\r\n for each_cell in board.get_empty_squares():\r\n passboard = board.clone()\r\n passboard.move(each_cell[0], each_cell[1], player) \r\n other_player = provided.switch_player(player)\r\n nextmove = mm_move(passboard, other_player)\r\n score_list.append(nextmove[0])\r\n move_list.append(nextmove[1])\r\n if nextmove[0] == SCORES[player]:\r\n return nextmove[0], each_cell\r\n #print score_list\r\n #print move_list\r\n #print \"\"\r\n if player == provided.PLAYERX:\r\n best_score = max(score_list)\r\n else:\r\n best_score = min (score_list)\r\n best_move = move_list[score_list.index(best_score)]\r\n return best_score, best_move",
"def best_move(self, scores):\n \n max_val = max(scores)\n \n tie_list = []\n for i in range(len(scores)):\n if scores[i] == max_val:\n tie_list.append(i)\n if self.is_random:\n return random.choice(tie_list)\n else:\n return tie_list[0]",
"def get_move(self, game, time_left):\n legal_moves = game.get_legal_moves()\n if not legal_moves:\n return (-1, -1)\n _, move = max([(self.score(game.forecast_move(m), self), m) for m in legal_moves])\n return move",
"def mm_move(board, player):\r\n if board.check_win() == provided.PLAYERX:\r\n return SCORES[provided.PLAYERX],(-1,-1)\r\n elif board.check_win() == provided.PLAYERO:\r\n return SCORES[provided.PLAYERO],(-1,-1)\r\n elif board.check_win() == provided.DRAW:\r\n return SCORES[provided.DRAW],(-1,-1)\r\n else:\r\n empty_tuple_list = board.get_empty_squares()\r\n score_pos_tuple_list = []\r\n best_score = None\r\n best_pos = None\r\n for idx1 in range(len(empty_tuple_list)):\r\n empty_tuple = empty_tuple_list[idx1]\r\n board_clone = board.clone()\r\n board_clone.move(empty_tuple[0],empty_tuple[1],player)\r\n score_pos_tuple = mm_move(board_clone,provided.switch_player(player))\r\n score_pos_tuple_list.append(score_pos_tuple)\r\n\r\n #decide best score and pos fast!!!\r\n if score_pos_tuple[0]*SCORES[player] == 1:\r\n return (score_pos_tuple[0],empty_tuple)\r\n\r\n #decide best score and pos\r\n for idx2 in range(len(score_pos_tuple_list)):\r\n if idx2 == 0:\r\n best_score = score_pos_tuple_list[idx2][0]\r\n best_pos = empty_tuple_list[idx2]\r\n else:\r\n if score_pos_tuple_list[idx2][0]*SCORES[player] > best_score*SCORES[player]:\r\n best_score = score_pos_tuple_list[idx2][0]\r\n best_pos = empty_tuple_list[idx2]\r\n\r\n return (best_score,best_pos)",
"def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # Longest Path Heuristic (used towards end game)\n\n game_phase = len(game.get_blank_spaces()) # high if early, low if late in game\n max_phase = game.width*game.height\n\n def longestPath(player,game,path=0,longest=0):\n moves = game.get_legal_moves(player)\n if path > longest:\n longest = path\n if len(moves) == 0:\n path = 0\n for move in moves:\n new_board = game.forecast_move(move)\n longestPath(player,new_board,path+1,longest)\n return longest\n\n if (game_phase<15): # only feasible to calculate late-game\n game_phase = abs(game_phase-max_phase) # low if early, high if late in game\n return (longestPath(player,game)-longestPath(game.get_opponent(player),game))\n else:\n opponent = game.get_opponent(player)\n return float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))",
"def next_move(self, board):\r\n scores = self.scores_for(board)\r\n self.num_moves += 1\r\n return self.max_score_column(scores)",
"def best_score(scores):\n idx, score = sorted(\n enumerate(scores), key=lambda e: e[1], reverse=scores[0].higher_better\n )[0]\n return (idx + 1, score)",
"def get_move(self, game, time_left):\n self.time_left = time_left\n self.best_move = (-1, -1)\n\n try:\n i = 1\n while True:\n self.best_move = self.alphabeta(game, i)\n i = i + 1\n return self.best_move\n except SearchTimeout:\n return self.best_move\n\n return self.best_move",
"def __get_next_greedy_move(self, game_state): \n best_move = None\n best_score = None\n for free_seat in self.__get_free_seats(game_state):\n next_game_state_score = self.__get_score(game_state, free_seat)\n if best_score is None:\n best_score = next_game_state_score\n best_move = free_seat\n continue\n if next_game_state_score > best_score:\n best_score = next_game_state_score\n best_move = free_seat\n return best_move",
"def get_best_move(board, scores):\n empty = board.get_empty_squares()\n if len(empty) == 0:\n return\n best_move = None\n best_score = None\n for square in empty:\n if best_move == None or scores[square[0]][square[1]] > best_score:\n best_move = square\n best_score = scores[square[0]][square[1]]\n return best_move",
"def find_best_move(board, score):\n\n global UP, DOWN, LEFT, RIGHT\n global move_args\n\n max_depth = 1\n if score <= 10000:\n max_depth += 0\n elif score <= 20000:\n max_depth += 1\n else:\n max_depth += 2\n\n pool = mp.Pool(mp.cpu_count())\n\n result = [pool.apply(score_toplevel_move, args=(i, board, max_depth)) for i in range(len(move_args))]\n best_move = result.index(max(result))\n\n pool.close()\n\n for m in move_args:\n print(\"move: %d score: %.4f\" % (m, result[m]))\n\n return best_move",
"def get_highscore(self, score):\n scores = list(self.history_score.values())\n \n # Compare current score with the last placing in leaderboard.\n if score > max(scores):\n return 0\n else:\n if score < min(scores):\n return 2\n else:\n return 1",
"def get_move(self, game, time_left):\n\n self.time_left = time_left\n legal_moves = game.get_legal_moves()\n if not legal_moves:\n return (-1, -1)\n\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_score = float(\"-inf\")\n best_move = legal_moves[0]\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n self.search_depth = 1\n while best_score is not float(\"inf\"):\n best_move = self.alphabeta(game, self.search_depth, alpha=float(\"-inf\"), beta=float(\"inf\"))\n self.search_depth += 1\n except SearchTimeout:\n return best_move\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move",
"def frame_score(self):\n\n # look ahead\n length = {\n \"STRIKE\": 3,\n \"SPARE\": 3,\n \"OPEN\": 2,\n \"UNFINISHED\": 0\n }[self._type]\n\n key = self._parent._rolls.index(self._score[0])\n rolls = self._parent._rolls[key:key+length]\n return sum(roll.pins for roll in rolls)",
"def open_move_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n return float(len(game.get_legal_moves(player)))",
"def next_move(self, board):\n \n return self.best_move(self.score_columns(board))",
"def get_best_move(board, scores):\n empty_squares = board.get_empty_squares()\n max_score = -float(\"inf\")\n max_list = []\n \n if len(board.get_empty_squares()) == 0:\n print \"No Empty Tiles Left!\"\n else:\n for row in range(board.get_dim()):\n for col in range(board.get_dim()):\n if scores[row][col] > max_score and (row, col) in empty_squares:\n max_score = scores[row][col]\n max_list = [row, col]\n max_tuple = (max_list[0], max_list[1])\n return max_tuple",
"def get_best_move(self):\n moves1 = self.get_next_moves1() # moves1 represents all legal moves.\n moves2 = self.get_next_moves2() # moves2 represents the moves that allow the AI to score a box.\n moves3 = self.get_next_moves3() # moves3 represents the moves that will allow the player to score a box.\n\n\n if len(moves1) == 0: # the siuation that there is no legal move\n return self\n if len(moves2) != 0:\n return moves2[len(moves2) // 2] # the siuation that there is(are) move(s) to score\n\n elif len(moves3) != 0:\n return moves3[len(moves3) // 2] # the siuation that there is(are) moves(s) to allow the player to score\n\n else:\n return moves1[len(moves1) // 2] # if there is no better moves, the AI will play sequentially, starting from the top left.",
"def get_best_move(board, scores):\n if board.check_win()==None:\n maximum=-999\n ret_location=(0,0)\n empty=board.get_empty_squares()\n #print empty\n for position in empty:\n if scores[position[0]][position[1]]>maximum:\n maximum=scores[position[0]][position[1]]\n #print max\n ret_location=(position[0],position[1])\n return ret_location\n else:\n return None",
"def score(game):\r\n result = 0\r\n roll = 0\r\n game = game + [0]*(21 - len(game))\r\n\r\n for frame in range(0, 10):\r\n if is_strike(game, roll):\r\n result += _score_strike(game, roll)\r\n roll += 1\r\n elif is_spare(game, roll):\r\n result += _score_spare(game, roll)\r\n roll += 2\r\n else:\r\n result += _score_frame(game, roll)\r\n roll += 2\r\n\r\n return result",
"def get_best_move(board, scores): \n empty_squares = board.get_empty_squares()\n highest_score = None\n best_pos = []\n \n for empty in range(len(empty_squares)):\n pos = empty_squares[empty] \n if highest_score == None:\n highest_score = scores[pos[0]][pos[1]]\n if scores[pos[0]][pos[1]] >= highest_score:\n highest_score = scores[pos[0]][pos[1]]\n \n for empty in range(len(empty_squares)):\n pos = empty_squares[empty]\n if scores[pos[0]][pos[1]] == highest_score:\n best_pos.append(pos) \n return random.choice(best_pos)",
"def next_move(self, board):\r\n x = self.max_score_column(self.scores_for(board))\r\n self.num_moves += 1\r\n return x",
"def get_best_score_and_time(self):\n\n best_time = 10000\n best_score = 0\n\n for game in self.games:\n if game.status == \"won\":\n if best_time > game.timing:\n best_time = game.timing\n if best_score < game.score:\n best_score = game.score\n\n if best_time == 10000:\n best_time = 0\n\n return (best_score, best_time)",
"def get_move(self, game, time_left):\n\n self.time_left = time_left\n\n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n\n remaining_legal_moves = game.get_legal_moves(game.active_player)\n no_legal_moves = (-1, -1)\n best_move = no_legal_moves\n if not remaining_legal_moves:\n return no_legal_moves\n\n # Use random IDS depth between 9 and 12\n depth = random.randint(9, 12)\n\n try:\n # Perform IDS\n while True:\n depth += 1\n _, best_move = self.alphabeta(game, depth)\n\n if self.time_left() <= 0.001:\n return best_move\n\n except Timeout:\n # Handle any actions required at timeout, if necessary\n return best_move\n\n # Return the best move from the last completed search iteration\n return best_move",
"def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score",
"def __get_score(self, game_state, move):\n return self.q_values[self.__encode_state(game_state)][move][0]",
"def custom_score(game, player):\n # TODO: finish this function!\n if game.is_winner(player): # check to see if player is in state winner\n #print(\"You win!\")\n return math.inf # abstraction of score, +inf equates to a win\n elif game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf # abstraction of score, -inf equates to a loss\n\n # Opponent\n opponent = game.get_opponent(player)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n board_score = no_moves - opp_moves\n score = board_score/rem_spaces\n\n return float(score)",
"def mm_move(board, player):\n moves = []\n results = []\n best_score = None\n best_move = None\n \n opponet = op_player(player)\n \n if board.check_win() != None:\n \n if board.check_win() == provided.PLAYERX:\n return SCORES[provided.PLAYERX] , (-1, -1)\n \n if board.check_win() == provided.PLAYERO:\n return SCORES[provided.PLAYERO] , (-1, -1)\n \n if board.check_win() == provided.DRAW:\n return SCORES[provided.DRAW] , (-1, -1)\n \n free_steps = board.get_empty_squares()\n \n for step in free_steps:\n clone = board.clone() \n clone.move(step[0],step[1],player)\n temp = mm_move(clone,opponet)\n \n if temp != None:\n if temp[0] == SCORES[player]: \n return temp[0] , step \n else: \n results.append(temp)\n moves.append(step)\n \n for result, move in zip(results, moves): \n if result[0] * SCORES[player] > best_score:\n best_score = result[0]\n best_move = move\n return best_score, best_move",
"def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n\n best_move = (-1, -1)\n\n while self.time_left() > self.TIMER_THRESHOLD:\n self.update(game)\n\n if not self.is_terminal(game):\n moves = game.get_legal_moves()\n scores = [(self._plays[game.forecast_move(m)], m) for m in moves]\n _, best_move = max(scores, key=lambda s: s[0])\n\n return best_move"
]
| [
"0.66091055",
"0.64791185",
"0.6124664",
"0.6111734",
"0.6031296",
"0.5966228",
"0.59599215",
"0.5930257",
"0.5900158",
"0.5900001",
"0.5888329",
"0.586471",
"0.5856103",
"0.5846379",
"0.58395654",
"0.5838082",
"0.5822587",
"0.58184224",
"0.5794663",
"0.57706267",
"0.57643527",
"0.5763701",
"0.5744992",
"0.5744353",
"0.5736233",
"0.5733473",
"0.5732313",
"0.57270765",
"0.5720766",
"0.57090706"
]
| 0.7738779 | 0 |
Return the score of the game if R is the next to move. R looks for low scores | def scoreR(self) :
if self.leafR() :
return self.leafScore(), self
else :
games = self.R()
min_g = games[0]
min_score = min_g.scoreL()
for g in games[1:] :
score = g.scoreL()
if score[0] < min_score[0] :
min_g = g
min_score = score
return (min_score+(min_g,)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open_move_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n return float(len(game.get_legal_moves(player)))",
"def get_score(self) -> int:\n return self.rstate.score()",
"def check_score(self, move):\n\n i = int(move) // 7\n j = int(move) % 7 #find the corresponding index of the input move\n\n if i == 0: #top\n if self.board[i+1][j-1] != \"\" and self.board[i+1][j+1] != \"\" and self.board[i+2][j] != \"\":\n return 1\n return 0\n if i == 6: #bottom\n if self.board[i-1][j-1] != \"\" and self.board[i-1][j+1] != \"\" and self.board[i-2][j] != \"\":\n return 1\n return 0\n if j == 0: #left\n if self.board[i-1][j+1] != \"\" and self.board[i+1][j+1] != \"\" and self.board[i][j+2] != \"\":\n return 1\n return 0\n if j == 6: #right\n if self.board[i-1][j-1] != \"\" and self.board[i+1][j-1] != \"\" and self.board[i][j-2] != \"\":\n return 1\n return 0\n if i == 2 or i == 4: # horizontal\n score = 0\n if self.board[i-1][j-1] != \"\" and self.board[i-1][j+1] != \"\" and self.board[i-2][j] != \"\":\n score += 1\n if self.board[i+1][j-1] != \"\" and self.board[i+1][j+1] != \"\" and self.board[i+2][j] != \"\":\n score += 1\n return score\n\n if j == 2 or j == 4: # vertical\n score = 0\n if self.board[i-1][j-1] != \"\" and self.board[i+1][j-1] != \"\" and self.board[i][j-2] != \"\":\n score += 1\n if self.board[i-1][j+1] != \"\" and self.board[i+1][j+1] != \"\" and self.board[i][j+2] != \"\":\n score += 1\n return score",
"def custom_score(game, player):\n # TODO: finish this function!\n if game.is_winner(player): # check to see if player is in state winner\n #print(\"You win!\")\n return math.inf # abstraction of score, +inf equates to a win\n elif game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf # abstraction of score, -inf equates to a loss\n\n # Opponent\n opponent = game.get_opponent(player)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n board_score = no_moves - opp_moves\n score = board_score/rem_spaces\n\n return float(score)",
"def get_game_score(self):\n if self.game_is_tied():\n return 0\n elif self.is_game_won():\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n my_score = self.my_score - self.penalty_score if my_available_steps == 0 else self.my_score\n opp_score = self.opponent_score - self.penalty_score if opp_available_steps == 0 else self.opponent_score\n return (my_score - opp_score) / (abs(my_score) + abs(opp_score))\n else:\n if abs(self.my_score) + abs(self.opponent_score) == 0:\n return 0\n return (self.my_score - self.opponent_score) / (abs(self.my_score) + abs(self.opponent_score))",
"def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)",
"def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc",
"def _scoring(self):\n val = 0 #score will be totaled here\n\n for c in range(0, self.width): #for every column in the board\n for r in range(0, self.height): #for every row of a column\n #see if we can move...\n possible_up = (r + 3 < self.height) #up?\n possible_left = (c - 3 > 0) #left?\n possible_right = (c + 3 < self.width) #right?\n\n #diagonally up, left\n if possible_up and possible_left:\n val+= self._up_left(c, r)\n\n #up\n if possible_up:\n val+= self._up(c,r)\n\n #diagonally up, right\n if possible_up and possible_right:\n val+= self._up_right(c,r)\n\n #right\n if possible_right:\n val+= self._right(c,r)\n\n\n return val",
"def next_move(self, board):\r\n scores = self.scores_for(board)\r\n self.num_moves += 1\r\n return self.max_score_column(scores)",
"def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.move_count < 15:\n return center_modified_score(game, player)\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)",
"def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)",
"def evaluate_board(self, board):\n \n win_score = 100\n win_or_loss_score = 50\n lose_score = 0\n \n if board.win_for(self.opponent()):\n return lose_score\n if board.win_for(self.side):\n return win_score\n if not board.win_for(self.side) or not board.win_for(self.opponent()):\n return win_or_loss_score",
"def basic_evaluate(board):\n if board.is_game_over():\n # If the game has been won, we know that it must have been\n # won or ended by the previous move.\n # The previous move was made by our opponent.\n # Therefore, we can't have won, so return -1000.\n # (note that this causes a tie to be treated like a loss)\n score = -1000\n else:\n score = board.longest_chain(board.get_current_player_id()) * 10\n # Prefer having your pieces in the center of the board.\n for row in range(6):\n for col in range(7):\n if board.get_cell(row, col) == board.get_current_player_id():\n score -= abs(3-col)\n elif board.get_cell(row, col) == board.get_other_player_id():\n score += abs(3-col)\n\n return score",
"def get_r_score(self):\n return self.r_score",
"def get_score(self, game_state):\n if self.red:\n return game_state.get_score()\n else:\n return game_state.get_score() * -1",
"def scoreL(self) :\n if self.leafL() :\n #\n # Here L has no possible moves. Return\n # the leaf score at the current leaf.\n #\n return self.leafScore(), self\n else :\n games = self.L()\n max_g = games[0]\n max_score = max_g.scoreR()\n for g in games[1:] :\n score = g.scoreR()\n if score[0] > max_score[0] :\n max_g = g\n max_score = score\n return (max_score+(max_g,))",
"def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore",
"def mm_move(board, player): \r\n if board.check_win() != None:\r\n score = SCORES[board.check_win()]\r\n return score, (-1,-1)\r\n else:\r\n best_score = -2\r\n score_list = []\r\n move_list = []\r\n for each_cell in board.get_empty_squares():\r\n passboard = board.clone()\r\n passboard.move(each_cell[0], each_cell[1], player) \r\n other_player = provided.switch_player(player)\r\n nextmove = mm_move(passboard, other_player)\r\n score_list.append(nextmove[0])\r\n move_list.append(nextmove[1])\r\n if nextmove[0] == SCORES[player]:\r\n return nextmove[0], each_cell\r\n #print score_list\r\n #print move_list\r\n #print \"\"\r\n if player == provided.PLAYERX:\r\n best_score = max(score_list)\r\n else:\r\n best_score = min (score_list)\r\n best_move = move_list[score_list.index(best_score)]\r\n return best_score, best_move",
"def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # Longest Path Heuristic (used towards end game)\n\n game_phase = len(game.get_blank_spaces()) # high if early, low if late in game\n max_phase = game.width*game.height\n\n def longestPath(player,game,path=0,longest=0):\n moves = game.get_legal_moves(player)\n if path > longest:\n longest = path\n if len(moves) == 0:\n path = 0\n for move in moves:\n new_board = game.forecast_move(move)\n longestPath(player,new_board,path+1,longest)\n return longest\n\n if (game_phase<15): # only feasible to calculate late-game\n game_phase = abs(game_phase-max_phase) # low if early, high if late in game\n return (longestPath(player,game)-longestPath(game.get_opponent(player),game))\n else:\n opponent = game.get_opponent(player)\n return float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))",
"def next_move(self, board):\r\n x = self.max_score_column(self.scores_for(board))\r\n self.num_moves += 1\r\n return x",
"def custom_score(game, player):\n # return penalize_corners_heuristic(game, player)\n # return favor_run_away_heuristic(game, player)\n return look_ahead_heuristic(game, player)",
"def r_point(self):\n self.r_score += 1\n self.update_scoreboard()",
"def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score",
"def custom_score(game, player):\n \"\"\" custom_score heuristic function idea is to implement aggressive heuristic function \n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) # Calculate length of myPlayer moves\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player))) # Calculate length of opposite player moves same as custom score 2\n return float(length_my_player_moves - 1.5*length_opp_payer_moves)",
"def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n \"\"\"\n #Heuristic 1: Aggressive Improved Score\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n return float(own_moves - 2*opp_moves)\n\n \"\"\"\n\n \"\"\"\n #Heuristic 2: Border/Non-Border Differentiated Moves Scoring\n border_moves = [(0,0), (0,1), (0,2), (0,3), (0,4), (0,5), (0,6),\n (1,0), (1,6), (2,0), (2,6), (3,0), (3,6), (4,0),\n (4,6), (5,0), (5,6), (6,0), (6,1), (6,2), (6,3),\n (6,4), (6,5), (6,6)]\n own_score = 0\n opp_score = 0\n for each_move in game.get_legal_moves(player):\n if each_move in border_moves:\n own_score = own_score + 1\n else:\n own_score = own_score + 1.5\n\n for each_move in game.get_legal_moves(game.get_opponent(player)):\n if each_move in border_moves:\n opp_score = opp_score + 1\n else:\n opp_score = opp_score + 1.5\n\n return float(own_score - opp_score)\n \"\"\"\n\n #Heuristic 3: Advanced Differentiated Board scoring\n border_moves = [(0,0), (0,1), (0,2), (0,3), (0,4), (0,5), (0,6),\n (1,0), (1,6), (2,0), (2,6), (3,0), (3,6), (4,0),\n (4,6), (5,0), (5,6), (6,0), (6,1), (6,2), (6,3),\n (6,4), (6,5), (6,6)]\n\n next_to_border_moves = [(1,1), (1,2), (1,3), (1,4), (1,5), (2,1),\n (2,5), (3,1), (3,5), (4,1), (4,5),\n (5,1), (5,2), (5,3), (5,4), (5,5)]\n\n own_score = 0\n opp_score = 0\n\n for move in game.get_legal_moves(player):\n if move in border_moves:\n own_score += 1\n elif move in next_to_border_moves:\n own_score += 1.2\n else:\n own_score += 1.5\n\n for move in game.get_legal_moves(game.get_opponent(player)):\n if move in border_moves:\n opp_score += 1\n elif move in next_to_border_moves:\n opp_score += 1.2\n else:\n opp_score += 1.5\n\n return float(own_score - opp_score)",
"def custom_score_2(game, player):\n # TODO: finish this function!\n if game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf\n if game.is_winner(player):\n #print(\"You win\")\n return math.inf\n\n # center\n width = game.width / 2\n height = game.height / 2\n\n # Opponent\n opponent = game.get_opponent(player)\n opp_y_coord, opp_x_coord = game.get_player_location(opponent)\n opp_x_eval = (width - float(opp_x_coord)) ** 2\n opp_y_eval = (height - float(opp_y_coord)) ** 2\n opp_center_eval = float(opp_x_eval + opp_y_eval)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n opp_score = opp_moves * 2 - opp_center_eval\n score = no_moves - opp_score/rem_spaces\n return float(score)",
"def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)",
"def evaluate(self):\n # if player has no move, then player lost, -inf or inf depend on who the player is\n # if player has moves, use heuristics.\n \n #checkColorMoves = self.getAvailableMoves(self.colorIndex)\n #otherColorMoves = self.getAvailableMoves(1-self.colorIndex)\n \n checkColorMoves = self.getAvailableMovesPreferLonger(self.colorIndex)\n otherColorMoves = self.getAvailableMovesPreferLonger(1-self.colorIndex)\n\n checkColorPieces = self.getPieceCount(self.colorIndex)\n otherColorPieces = self.getPieceCount(1-self.colorIndex)\n\n #checkColorEdgePieces = self.getEgdePieceCount(self.colorIndex)\n #otherColorEdgePieces = self.getEgdePieceCount(1-self.colorIndex)\n\n if self.player == 'computer':\n if checkColorMoves == 0: #computer doesn't have moves\n return float('-inf')\n elif otherColorMoves == 0: #user doesn't have moves\n return float('inf')\n else:\n #return checkColorPieces - otherColorPieces\n return checkColorMoves - otherColorMoves\n else:\n if checkColorMoves == 0: #user doesn't have moves\n return float('inf')\n elif otherColorMoves == 0: #computer doesn't have moves\n return float('-inf')\n else:\n #return otherColorPieces - checkColorPieces\n return otherColorMoves - checkColorMoves",
"def getScore(self, gameState):\n\n if (self.red):\n return gameState.getScore()\n else:\n return gameState.getScore() * -1",
"def check_score(self):\r\n # If player reaches the exit before time expires\r\n final_score = int((5 - self._maze.items_remaining())*(self._time)*(100))\r\n score = (final_score, 5 - self._maze.items_remaining(), self._time)\r\n\r\n return score"
]
| [
"0.71845496",
"0.70455074",
"0.7044914",
"0.70207614",
"0.7008354",
"0.69949126",
"0.69718325",
"0.6961224",
"0.69367784",
"0.6910868",
"0.6904701",
"0.6903983",
"0.6901333",
"0.68918365",
"0.68903387",
"0.68803644",
"0.6870168",
"0.6866687",
"0.68577814",
"0.6853682",
"0.68445337",
"0.6833694",
"0.68196535",
"0.68099296",
"0.68058705",
"0.68056655",
"0.673488",
"0.6732992",
"0.67164975",
"0.67119306"
]
| 0.72899574 | 0 |
Inverse current selected image by multiplying with 1. | def inverse_image(model):
# get data and name from current selected image
current_row = model.currentIndex().row()
source_vol = model.data(model.index(current_row), Qt.UserRole + 6)
source_name = model.data(model.index(current_row), Qt.DisplayRole)
# inverse process
inversed_vol = imtool.inverse_transformation(source_vol)
inversed_vol_name = 'inverse_' + source_name
# save result as a new image
model.addItem(inversed_vol, None, inversed_vol_name,
model._data[0].get_header())
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def invert(self, img):\n return self.inverse()(img)",
"def inv_img(img):\n return np.abs(img - 1.)",
"def inverse(im): \t \n x,y = np.shape(im)\n img = np.zeros([x,y])\n\t\n for i in range(x):\n for j in range(y):\n img[i,j] = 255 - im[i,j]\n return img",
"def __invert__(self):\n return self.inverse()",
"def inverse( self ):\r\n\t\treturn fraction( self.denominator, self.numerator )",
"def inverse(self, x, y):",
"def _inv(self) -> None:\n\n self.inv(inplace=True)",
"def invert_image(image, *args, **kwargs):\n # TODO: Implement the method\n\n image2 = Image.fromarray(image)\n image3 = Image.eval(image2, lambda a: 255-a)\n inv_image = num.asarray(image3)\n\n return inv_image",
"def scale_invert(self):",
"def invert(image, name=None):\n _check_image_dtype(image)\n\n with tf.name_scope(name or \"invert\"):\n if image.dtype == tf.uint8:\n inv_image = 255 - image\n else:\n inv_image = 1. - image\n return inv_image",
"def inverse(self, x):\n x = np.asarray(x)\n def r(vec):\n return utils.recycled(vec, as_=x)\n if self.zero is not None and self.multiplier is not None:\n x = x / r(self.multiplier) + r(self.zero)\n elif self.zero is not None:\n x = x + r(self.zero)\n elif self.multiplier is not None:\n x = x / r(self.multiplier)\n return x",
"def inverse(self):\n return self.invert()",
"def __invert__(self):\r\n return 1 - self",
"def invert_selection(self):\n pass",
"def inverse(self) -> 'Invertible':\n raise NotImplementedError",
"def transform(self, previousimage):",
"def inv(self):\n return self.conjugate()",
"def __invert__(self):\r\n return self.__class__(self._real, -self._imag)",
"def asin(self):\r\n getcontext().prec += 2\r\n im1 = self.__class__(0, 1)\r\n arg = im1*self + (1 - self*self).sqrt1()\r\n ans = -im1 * arg.ln()\r\n getcontext().prec -= 2\r\n return +ans",
"def inverse(self):\n return self._inverse",
"def invert(self):\n tmp = self.pvt\n self.pvt = self.nvt\n self.nvt = tmp\n tmp = self.pFace\n self.pFace = self.nFace\n self.nFace = tmp",
"def inv(self, y):\n pass",
"def multiplicative_inverse(self, a: 'PFElement') -> 'PFElement':\n return self(self._pf_multiplicative_inverse(a.value, self.multiplicative_group))",
"def track(self, img, index=None):\n if index is None:\n index = len(self._tracked_inverses)\n img = self.__call__(img)\n self._tracked_inverses[index] = self.inverse()\n return img",
"def inverse(self):\n return fraction(self.denom, self.num)",
"def inverse(self, x):\n return self.mul(self.weights, x.unsqueeze(-1)).squeeze(-1) + self.shift\n #return self.mul(torch.inverse(self.weights), (x - self.shift).unsqueeze(-1)).squeeze(-1)",
"def inverse(self):\n if self.inv is None:\n if self.size > 0:\n self.inv = inverseIndex(self)\n else:\n self.inv = Connectivity()\n return self.inv",
"def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return self.__class__(self._diag.reciprocal())",
"def inverse(self):\n n = self.norm()\n c = self.conj()\n d = 1.0 / (n * n)\n c.scale(d)\n return c",
"def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()"
]
| [
"0.7295463",
"0.6611736",
"0.64922965",
"0.63058925",
"0.6305444",
"0.6302544",
"0.62361765",
"0.62113416",
"0.6208909",
"0.61493313",
"0.6142045",
"0.612923",
"0.6120892",
"0.60997754",
"0.6098013",
"0.60662985",
"0.6061178",
"0.6061004",
"0.6035097",
"0.6024617",
"0.601124",
"0.59951824",
"0.5989537",
"0.596557",
"0.59640497",
"0.5956023",
"0.5897719",
"0.5894034",
"0.5878774",
"0.58780485"
]
| 0.71684796 | 1 |
draw a tile returns None if bag is empty returns first tile first | def draw(self):
tiles = list(self.bag.elements())
if len(tiles) == 0:
return None
if self.first_tile:
chosen_tile = self.first_tile
self.first_tile = None # done, we've drawn the first tile
else:
chosen_tile = random.choice(tiles)
self.bag[chosen_tile] -= 1
return chosen_tile | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_tile(self, tile):\n raise NotImplemented()",
"def doTile(tile):\n global d, fmt, output, img, demag\n # get adjusted upper left coordinate for tile\n xstart,ystart=getCoords(tile)\n px = 256//demag\n tumor,blank=0,0\n for y in range(0,px):\n for x in range(0,px):\n curry,currx = y+ystart,x+xstart\n B,G,R = img.item(currx,curry,0),img.item(currx,curry,1),img.item(currx,curry,2)\n if B > 220 and G > 220 and R > 220:\n blank += 1\n if blank > (px**2)/2:\n print('removing %s' % tile)\n #os.remove(tile)\n return(1)\n if B < 70 and G > 180 and R < 70:\n tumor = 1\n print(\"%s tumor = %d\" % (tile,tumor))\n output.write(str(tumor)+',')\n \n blank = int(blank*2 > px**2)\n tumor = (tumor > 0)\n return(blank)",
"def get_tile(self, row, col):\n # replace with your code\n return 0",
"def draw(self,screen):\n for tile in self.tile_list:\n screen.blit(tile[0],tile[1])\n # pygame.draw.rect(screen,(255,255,255),tile[1],2)\n\n for tile in self.objList:\n screen.blit(tile[0],tile[1])\n # pygame.draw.rect(screen,(255,255,255),tile[1],2)\n # rectangle print for tiles",
"def draw_tile(tile_id):\n if tile_id == 0:\n return \" \"\n if tile_id == 1:\n return \"#\"\n if tile_id == 2:\n return \"+\"\n if tile_id == 3:\n return \"-\"\n return \"o\"",
"def draw_tile(surface, tile, x, y, size=TILE_SIZE, castle_color=None):\n tile_img = None\n if tile[0] == 'C':\n tile_img = pygame.image.load(castle_color)\n else:\n tile_img = pygame.image.load('images/' + TERRAINS[tile[0]])\n\n crown_pos = 0\n for _ in range(tile[1]):\n tile_img.blit(pygame.image.load('images/crown.png'), (crown_pos, 0))\n crown_pos += CROWN_SIZE\n\n tile_img = pygame.transform.scale(tile_img, (size, size))\n surface.blit(tile_img, (x, y))",
"def query_image_tile(self, coord):",
"def new_tile(self):\r\n # replace with your code\r\n empty_square_lists = []\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if(self.get_tile(row, col) == 0):\r\n empty_square_lists.append((row, col))\r\n \r\n if len(empty_square_lists) == 0:\r\n return \"game over!\"\r\n \r\n random_cell = random.choice(empty_square_lists)\r\n random_cell_row = random_cell[0]\r\n random_cell_col = random_cell[1]\r\n \r\n values = [2] * 90 + [4] * 10\r\n value = random.choice(values)\r\n \r\n self.set_tile(random_cell_row, random_cell_col, value)",
"def isTileBlank(tile):\n for b in tile:\n if b: return False\n return True",
"def load_tile(tile):\n return pygame.image.load(tile[\"states\"][\"default\"][0])",
"def draw(self, surface):\n\n surface.fill(BLACK)\n\n for line in range(len(self.structure)):\n for sprite in range(len(self.structure[line])):\n x = sprite * SPRITE_SIZE\n y = line * SPRITE_SIZE\n if self.structure[line][sprite] == 'b': \n if line == 0 and sprite == 0: #top left corner\n self.draw_top_left_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line > 0 and sprite > 0 and self.structure[line - 1][sprite] == 'n' and self.structure[line][sprite - 1] == 'n': # top left corner\n self.draw_top_left_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line == 0 and sprite == len(self.structure[line]) - 1 : # top right corner\n self.draw_top_right_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line > 0 and sprite < len(self.structure[line]) - 1 and self.structure[line - 1][sprite] == 'n' and self.structure[line][sprite + 1] == 'n': # top right corner\n self.draw_top_right_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line == len(self.structure) - 1 and sprite == len(self.structure[line]) - 1 : # bottom right corner\n self.draw_bottom_right_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line < len(self.structure) - 1 and sprite < len(self.structure[line]) - 1 and self.structure[line + 1][sprite] == 'n' and self.structure[line][sprite + 1] == 'n': # bottom right corner\n self.draw_bottom_right_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line == len(self.structure) - 1 and sprite == 0 : # bottom left corner\n self.draw_bottom_left_corner(surface, BLUE, x, y, SPRITE_SIZE)\n elif line < len(self.structure) - 1 and sprite > 0 and self.structure[line + 1][sprite] == 'n' and self.structure[line][sprite - 1] == 'n': # bottom left corner\n self.draw_bottom_left_corner(surface, BLUE, x, y, SPRITE_SIZE)\n else:\n rect(surface, BLUE, (x, y, SPRITE_SIZE, SPRITE_SIZE))\n elif self.structure[line][sprite] == 'n': \n rect(surface, BLACK, (x, y, SPRITE_SIZE, SPRITE_SIZE))\n elif self.structure[line][sprite] == 'o': \n rect(surface, BLACK, (x, y, SPRITE_SIZE, SPRITE_SIZE))\n elif self.structure[line][sprite] == 'v':\n rect(surface, GREEN, (x, y, SPRITE_SIZE, SPRITE_SIZE))\n \n for pacgum in self.pacgums:\n x = pacgum[0] * SPRITE_SIZE + SPRITE_SIZE // 2\n y = pacgum[1] * SPRITE_SIZE + SPRITE_SIZE // 2\n circle(surface, YELLOW, (x, y), SPRITE_SIZE // 5)",
"def new_tile(self):\r\n random_row = random.randrange(0, self._grid_height)\r\n random_col = random.randrange(0, self._grid_width)\r\n random_choice = random.choice([2]*90 + [4] * 10)\r\n \r\n if 0 in [num for elem in self._cells for num in elem]: \r\n if self._cells[random_row][random_col] == 0:\r\n self._cells[random_row][random_col] = random_choice \r\n else:\r\n self.new_tile()\r\n else:\r\n pass",
"def getBlank(self):\n return self.tiles[-1]",
"def new_tile(self):\r\n # replace with your code\r\n # complete search ....\r\n non_zero_count = 0;\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if self._grid_tile[row][col] == 0:\r\n non_zero_count += 1\r\n random_choice = random.randrange(0, non_zero_count)\r\n count = 0\r\n # another search ....\r\n generated_new_tile = False\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if generated_new_tile == False and self._grid_tile[row][col] == 0:\r\n if count != random_choice:\r\n count += 1 \r\n else:\r\n if random.randrange(0,100) < 10:\r\n self.set_tile(row, col ,4)\r\n else:\r\n self.set_tile(row, col ,2)\r\n generated_new_tile = True",
"def draw_missile(self):\n pygame.draw.rect(self.screen, self.color, self.rect)",
"def draw(self):\n if self.open:\n self.xpos += (200-self.xpos) * 0.1\n else:\n self.xpos += (-self.xpos) * 0.1\n\n # get the display size\n dispw, disph = c_int(), c_int()\n SDL_GetRendererOutputSize(self.rend,dispw,disph)\n\n # don't waste resources drawing the pallet if it isn't onscreen\n if self.xpos > 5:\n #draw the background for the tile pallet\n SDL_SetRenderDrawColor(self.rend,0,0,0,200)\n rect = SDL_Rect()\n rect.x, rect.y, rect.w, rect.h = round(self.xpos-200),0,200,disph.value\n SDL_RenderFillRect(self.rend,rect)\n\n # draw edge line \n SDL_SetRenderDrawColor(self.rend,255,255,255,255)\n rect.x, rect.y, rect.w, rect.h = round(self.xpos-1),0,1,disph.value\n SDL_RenderFillRect(self.rend,rect)\n\n # draw tile previews\n for i in range(len(self.itemList.items)+1):\n # highlight selected tile\n if i-1 == self.selected:\n rect.x, rect.y, rect.w, rect.h = round(self.xpos-185),i*150+45-self.scroll,138,138\n SDL_SetRenderDrawColor(self.rend,255,255,255,100)\n SDL_RenderFillRect(self.rend,rect)\n # draw tile preview\n rect.x, rect.y, rect.w, rect.h = round(self.xpos-180),i*150+50-self.scroll,128,128\n if i >= 1:\n for x in self.itemList.items[i-1].find('display'):\n if x.tag == 'rect':\n colors = x.find('color').text[1:-1].split(',')\n SDL_SetRenderDrawColor(self.rend,int(colors[0]),int(colors[1]),int(colors[2]),int(colors[3]) if len(colors) > 3 else 255)\n SDL_RenderFillRect(self.rend,rect)\n #SDL_RenderCopy(self.rend,self.tileSet.getTex(i),None,rect)\n SDL_SetRenderDrawColor(self.rend,255,255,255,255)\n\n # draw the file name for the tile\n quickRenderText(self.rend,self.ft_Mono16,self.itemList.items[i-1].find('name').text.strip(),rect.x,rect.y+128)\n else:\n #SDL_RenderCopy(self.rend,self.tileSet.getTex(i),None,rect)\n SDL_SetRenderDrawColor(self.rend,255,255,255,255)\n\n # draw the file name for the tile\n quickRenderText(self.rend,self.ft_Mono16,\"Edit Only\",rect.x,rect.y+128)",
"def get_tile(self, row, col):\r\n # replace with your code\r\n return self.grid[row][col]",
"def __draw_tiles(self, state):\n tile_to_display_char = {\n Tile.EMPTY: ' ',\n Tile.ORB: 'o',\n Tile.TAIL: curses.ACS_BLOCK,\n }\n\n for y in range(0, self.config.arena_size[1]):\n for x in range(0, self.config.arena_size[0]):\n tile = state.arena[x][y]\n display_char = tile_to_display_char[tile]\n try:\n self.arena_win.addch(y + 1, x + 1, display_char)\n except (curses.error):\n # addch() fails at the bottom-right character because it tries\n # to scroll to a new line but no line exists. Best workaround\n # I could find.\n # https://stackoverflow.com/questions/37648557/curses-error-add-wch-returned-an-error\n pass",
"def generatePiece(self):\n\n empty_tiles = []\n for y in range(BOARD_SIZE):\n for x in range(BOARD_SIZE):\n if self.grid[x][y].isEmpty():\n empty_tiles.append(self.grid[x][y])\n\n two_or_four = random.choice([2, 4])\n random.choice(empty_tiles).set(two_or_four)",
"def draw(self, layer: Layer) -> None:\r\n if layer and layer.layer_index >= self.num_layers:\r\n return\r\n\r\n pyxel.bltm(layer.offset.x, layer.offset.y, self.tilemap_id + layer.layer_index,\r\n self.rect_uv.x, self.rect_uv.y, self.rect_uv.w, self.rect_uv.h,\r\n colkey=layer.transparency_color)",
"def setTile(self, cell, tile):\n assert isinstance(cell, tuple)\n cellx, celly = cell\n\n if cellx < 0 or cellx > self.map_array.shape[0]-1 or celly < 0 or celly > self.map_array.shape[1]-1:\n return\n\n if self.tile_dict.get((cellx, celly)):\n self.canvas.delete(self.tile_dict[(cellx, celly)])\n\n if tile:\n self.map_array[cellx, celly] = tile.tid\n if tile.tid == 0.0:\n return\n map_posx, map_posy = iso(cellx * self.cell_width, celly * self.cell_height)\n image = self.main.main_tilelist.images[tile.tid]\n self.tile_dict[(cellx, celly)] = self.canvas.create_image(map_posx, map_posy, image=image, anchor=tk.N)",
"def top_blit(self, x, y):\n xcoord = [int(math.ceil(x)), int(math.floor(x))]\n ycoord = [int(math.ceil(y)), int(math.floor(y)), int(math.ceil(y))+1]\n for i in xcoord:\n for j in ycoord:\n if (in_range(i,j)):\n if (self.blocks[i][j].image == Tree1):\n gameDisplay.blit(Tree1Part, self.blocks[i][j].realcoordinates)\n elif (self.blocks[i][j].image == Tree2):\n gameDisplay.blit(Tree2Part, self.blocks[i][j].realcoordinates)",
"def get_tile(self, row, col):\n # replace with your code\n return self.grid[row][col]",
"def draw_foreground(self):\n index = 0\n for tile in self.foreground_data:\n if tile != self.empty_tile:\n x_pos = (index * self.tile_size) % self.w\n y_pos = math.floor((index * self.tile_size) / self.w) * self.tile_size\n b = Block(tile, x_pos, y_pos)\n self.screen.entity_layer_1.add(b)\n index += 1",
"def get_tile(self, x, y):\n if x < 0 or x >= Settings.SIZE_X or y < 0 or y >= Settings.SIZE_Y:\n return MarkerType.NONE\n return self.__grid[y][x]",
"def draw_puzzle():\n # Define Baseboard\n baseboard = pygame.Rect(61, 70, 498, 498) # creates a rectangle object \n\n # Draw Baseboard\n pygame.draw.rect(RENDER_WINDOW, TEXTCOLOR, baseboard)\n\n tiles = GAME_PUZZLE.puzzle # fetch game puzzle\n\n gameboard = [] # mimics the puzzle_board.puzzle\n\n # define first tile position\n start_x = 62 \n start_y = 71\n\n # build a tile for each item in the game puzzle\n for i in range(0,len(tiles)):\n row = []\n for j in range(0, len(tiles[i])):\n\n if tiles[i][j] is not None: # only draw non - blank tile\n new_tile = pygame.Rect(start_x, start_y, 164, 164) # creates a rectangle object\n\n tile_txt = TILE_FONT.render(str(tiles[i][j]), True, TEXTCOLOR) # creates font \n\n row.append(new_tile) # add tile to row in 2d list\n\n pygame.draw.rect(RENDER_WINDOW, BUTTONCOLOR, new_tile) #draw title rectangle\n\n RENDER_WINDOW.blit(tile_txt, (new_tile.x + 40, new_tile.y + 20)) # render text centered on Tile\n else:\n new_tile = pygame.Rect(start_x, start_y, 164, 164) # creates a WHITE rectangle object\n row.append(new_tile)\n pygame.draw.rect(RENDER_WINDOW, TEXTCOLOR, new_tile) #draw title rectangle\n \n \n start_x += 166\n\n gameboard.append(row)\n start_x = 62 # reset for each row\n start_y += 166\n \n # update the global Board\n global BOARD\n BOARD = gameboard",
"def check(self):\n return self.tile==\"\"",
"def draw(self):\n if not self._move:\n px = self.get_pos_in_pixels()\n self.tile.draw(px.x, px.y, 32, 32)\n else:\n self._move.draw()",
"def draw_room(screen, grid, start_location):\n wall_image = pygame.image.load(\"images/pillar.png\")\n wall_image_transparent = pygame.image.load(\"images/pillar_80.png\")\n floor_image = pygame.image.load(\"images/floor.png\")\n computer_image = pygame.image.load(\"images/desk_computer.png\")\n\n # map_to_image = [floor_image, # 0\n # wall_image, # 1\n # wall_image_transparent, # 2\n # computer_image] # 3\n map_to_image = {\n \"0\": floor_image,\n \"1\": wall_image,\n \"2\": wall_image_transparent,\n \"3\": computer_image,\n \"10\": wall_image # Secret passage\n }\n # better tile management for multiple environments / create multiple environments.\n # 0 = floor, 1 = wall (pillar)\n # First draw floor everywhere\n max_dimensions = grid.shape\n for r in range(max_dimensions[0]):\n for c in range(max_dimensions[1]):\n screen.blit(floor_image, (c * 30 + start_location[0],\n r * 30 + start_location[1]))\n\n for tile_type in [1, 2, 3, 10]:\n the_rows, the_cols = np.where(grid == tile_type)\n for i in range(len(the_cols)):\n screen.blit(map_to_image[str(tile_type)], (the_cols[i] * 30 + start_location[0],\n the_rows[i] * 30 + start_location[1]))",
"def new_tile(self):\n while True:\n random_row = random.randrange(self._grid_height)\n random_column = random.randrange(self._grid_width)\n if self._grid[random_row][random_column] == 0:\n self._grid[random_row][random_column] = random.choice([2] * 9 + [4])\n break"
]
| [
"0.6956463",
"0.60607404",
"0.59384507",
"0.59070325",
"0.58282965",
"0.5801085",
"0.5735737",
"0.5669857",
"0.5658397",
"0.56544286",
"0.56393045",
"0.5629337",
"0.56218994",
"0.5612351",
"0.5593023",
"0.5592965",
"0.55929166",
"0.5585017",
"0.5556688",
"0.55470335",
"0.55092096",
"0.55063397",
"0.55039805",
"0.5484077",
"0.54798937",
"0.5465255",
"0.5459281",
"0.5454284",
"0.5448087",
"0.54270667"
]
| 0.8375741 | 0 |
does tile T fit in location L? | def tile_fits(self, location, tile):
x, y = location
CONNECTIONS_TO_CHECK = [
[(x+1, y), 'east', 'west'],
[(x-1, y), 'west', 'east'],
[(x, y+1), 'north', 'south'],
[(x, y-1), 'south', 'north']
]
for neighbor_loc, my_offset, their_offset in CONNECTIONS_TO_CHECK:
neighbor_tile = self.board.get(neighbor_loc)
if neighbor_tile and tile.edges._asdict()[my_offset] != neighbor_tile.edges._asdict()[their_offset]:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_tiles_to_bounds():\n tiles = [morecantile.Tile(x=150, y=182, z=9), morecantile.Tile(x=151, y=182, z=9)]\n assert len(utils.tiles_to_bounds(tiles)) == 4",
"def in_grid(self, tile):\n return 0 <= tile[0] < self.gs[0] and 0 <= tile[1] < self.gs[1]",
"def valid(point):\n index = offset(point)\n if tiles[index] == 0:\n return False\n\n index = offset(point + 19)\n\n if tiles[index] == 0:\n return False\n\n return point.x % 20 == 0 or point.y % 20 == 0",
"def test_room_has_tiles(self):\n self.assertEqual(self.room.tile_set.count(), self.room.grid_size ** 2)",
"def _check_large_tilt(self):\n large_tilt = []\n xy, xz, yz = self.tilt_factors\n x,y,_ = self.cell_lengths\n\n large_tilt.append(-x/2<xy<x/2)\n large_tilt.append(-x/2<xz<y/2)\n large_tilt.append(-x/2<yz<y/2)\n return not all(large_tilt)",
"def tile_exists(self, coords):\n\t\treturn not (\n\t\t\tcoords[0] < 0 or\n\t\t\tcoords[0] >= 25 or\n\t\t\tcoords[1] < 0 or\n\t\t\tcoords[1] >= 40)",
"def check_tile_covers_land(self, tilename=None):\n land_tiles = self.list_tiles_covering_land()\n if self.check_tilename(tilename):\n tilename = self.tilename2short(tilename)\n return tilename in land_tiles",
"def test_room_has_tiles(self):\n self.assertGreaterEqual(self.room.tile_set.count(), 2)",
"def valid_tile(self, i, j):\n if (i >= 0 and i < self.rows) and (j >= 0 and j < self.cols):\n return True\n return False",
"def test_lat_lon_to_tile(self):\n\n lat = 48\n lon = 37.7\n z = 10\n\n tile_calculated = geomath.lat_lon_to_tile(lat,lon,z)\n tile_known = (619,355,10)\n\n # make sure the tiles are the same\n self.assertEqual(tile_calculated,tile_known)",
"def isSolvable(self):\n tiles = []\n for i in range(len(self.tiles)):\n for j in range(len(self.tiles)):\n if self.tiles[j][1] * 3 + self.tiles[j][0] + 1 == i + 1:\n tiles.append(j + 1)\n count = 0\n for i in range(len(tiles) - 1):\n for j in range(i + 1, len(tiles)):\n if tiles[i] > tiles[j] and tiles[i] != 9:\n count += 1\n return count % 2 == 0 and count != 0",
"def tettile(board: Board, tiles: List[Tile]) -> Union[List[Tuple[Tuple[int, int], Tile]], bool]:\n solution = []\n tiles_used = set()\n for i, tile in enumerate(tiles):\n if tile.type in tiles_used: # Prevent us from trying the same failed piece over and over\n continue\n tiles_used |= set(tile.type) # Add the current tile to the set of used tiles\n for j in range(tile.num_orientations):\n # Find the most northwestern possible tile position (or False if there isn't one)\n position = board.tile_can_be_placed(tile)\n if type(position) == np.ndarray:\n # If there's a position, place the tile\n board.place_tile(tile, position)\n # It might be the case that the placed tile partitioned the board such that\n # there is at least one partition that doesn't have a multiple of 4 cells\n if not board.is_valid():\n # If that's the case, short circuit the search of this branch\n board.remove_tile(tile, position)\n tile.rotate()\n continue\n # Otherwise, append the tile to the list of possible solutions\n solution.append((position, tile))\n if board.is_solved():\n # If the board is now solved, return the solution so it can bubble up\n return solution\n # If the board is not solved, call the function recursively, slicing out the current\n # tile from the list of tiles passed into the recursive call\n result = tettile(board, tiles[:i] + tiles[i + 1:])\n if board.is_solved():\n # If the recursion found a solution, append it to the solution that contains\n # the current tile and return that to bubble it up\n return solution + result\n else:\n # If the recursion did not find a solution, remove the current tile from the list\n # of solutions and from the board.\n board.remove_tile(tile, position)\n solution.pop()\n # Make sure to try all orientations of a piece at the given location\n tile.rotate()\n # Return the empty list if no solution was found\n return solution",
"def find_tile(loc, dir):\n #returns the integer tile number\n \n # should be looking in the directory with supergrid data (probably \"fix\" directory)\n filename_pattern = '*grid.tile*.nc'\n \n #find all supergrid files in the directory\n grid_fnames = []\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n grid_fnames.append(f_name)\n if not grid_fnames:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n #non-polar tiles can use traditional 2D point-in-polygon methods; if a point is not in a non-polar tile,\n #it is in one of the polar tiles, and the tile can be distinguished by the sign of latitude of the point\n polar_tile_filenames = []\n found_tile = False\n for f_name in grid_fnames:\n if not found_tile:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n longitude = np.array(nc_file['x']).swapaxes(0,1)\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n \n #get lon/lat pairs for all edges of the tiles\n \n edge_1_lon = longitude[0,:]\n edge_1_lat = latitude[0,:]\n edge_1 = list(zip(edge_1_lon, edge_1_lat))\n \n edge_2_lon = longitude[:,-1]\n edge_2_lat = latitude[:,-1]\n edge_2 = list(zip(edge_2_lon, edge_2_lat))\n \n edge_3_lon = longitude[-1,:]\n edge_3_lat = latitude[-1,:]\n edge_3 = list(zip(edge_3_lon, edge_3_lat))\n edge_3.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n edge_4_lon = longitude[:,0]\n edge_4_lat = latitude[:,0]\n edge_4 = list(zip(edge_4_lon, edge_4_lat))\n edge_4.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n polygon_points = edge_1 + edge_2 + edge_3 + edge_4\n \n tile_polygon = Polygon(polygon_points)\n tile_polygon = tile_polygon.simplify(0)\n \n if tile_polygon.is_valid: #this will be True unless the tile is a polar tile, which will not form a regular polygon in Cartesian space using lon/lat data\n temp_loc = copy.deepcopy(loc)\n if adj_long:\n if loc[0] < 180:\n temp_loc[0] += 360\n loc_point = Point(temp_loc)\n if tile_polygon.contains(loc_point):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n else:\n polar_tile_filenames.append(f_name)\n \n #if the tile hasn't been found by this point, it must be contained within a polar tile\n for f_name in polar_tile_filenames:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n #if the sign of the mean latitude of the tile is the same as that of the point, the tile has been found\n if np.sign(np.mean(latitude)) == np.sign(loc[1]):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n return -1",
"def tile_exists_utm(boundsSrc, boundsTile):\n\n\n boundsSrcBox = box(*boundsSrc)\n boundsTileBox = box(*boundsTile)\n\n return boundsSrcBox.intersects(boundsTileBox)",
"def calculate_min_max_tiles(self):",
"def update_tile(tile, color, tiles):\n if color == BLACK:\n return num_black_neighbors(tile, tiles) in [1, 2]\n if color == WHITE:\n return num_black_neighbors(tile, tiles) == 2",
"def check_tile_availability(self, row, col):\n return self.board[row][col] == 0",
"def LongitudinalBounds(lng, num_tiles):\n # Normalize to between -180 and 180 degrees longitude.\n while lng < -180.0:\n lng += 360.0\n while lng >= 180.0:\n lng -= 360.0\n\n degrees_per_tile = 360.0 / num_tiles\n x = int((lng + 180.0) / degrees_per_tile)\n west = x * degrees_per_tile - 180.0\n return (west, west + degrees_per_tile)",
"def tile_checker(stage_tiles,\n player_new):\n tile = stage_tiles.get(\"{0},{1}\".format(player_new[0], player_new[1]), \"ocean\")\n # Check each possible terrain\n if tile == \"rock\" or tile == \"mountain\":\n valid = False\n color.write(\"You can't move into a {}!\\n\".format(tile),\"ERROR\")\n else:\n valid = True\n\n return valid",
"def compare_tile(t1, t2):\n matches = 0\n\n t1pos = get_all_perms(t1)\n t2pos = get_all_perms(t2)\n\n for t1 in t1pos:\n for t2 in t2pos:\n if t1.tolist() == t2.tolist():\n matches += 1\n\n return matches",
"def tileExists(x, y):\n return _world.get((x, y))",
"def query_image_tile(self, coord):",
"def check_if_double(tile: list):\n return tile[0] == tile[1]",
"def tileOccupied(self, i, j):\n if self.tiles[i][j] == 1 or i == 0 or i == self.size[0] - 1 or j == 0 or j == self.size[1] - 1:\n return True\n for prop in self.props:\n if prop.i == i and prop.j == j:\n return True\n return False",
"def __isTileWall(self, point):\n return self.__getElementFromPairs(point) == \"-\"",
"def getNumTiles(self):\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html\n return self.width * self.height",
"def find_tile(self, neighbour_list):\n if neighbour_list[1] == 'tile':\n if neighbour_list[2] == 'trap':\n if neighbour_list[0] == 'north':\n self.lab.disarm('north')\n print 'Disarm north trap'\n elif neighbour_list[0] == 'south':\n self.lab.disarm('south')\n print 'Disarm south trap'\n elif neighbour_list[0] == 'west':\n self.lab.disarm('west')\n print 'Disarm west trap'\n elif neighbour_list[0] == 'east':\n self.lab.disarm('east')\n print 'Disarm east trap'\n return True\n else:\n if neighbour_list[1] == 'toby':\n if neighbour_list[0] == 'north':\n self.moveNorth()\n elif neighbour_list[0] == 'south':\n self.moveSouth()\n elif neighbour_list[0] == 'west':\n self.moveWest()\n elif neighbour_list[0] == 'east':\n self.moveEast()\n return False",
"def get_tile(self, row, col):\n # replace with your code\n return 0",
"def wid_in(self, wid):\n \n for row in self.tiles:\n if wid in row:\n return True\n return False",
"def isPositionInRoom(self, pos):\n if pos in self.tiles:\n return True\n else:\n return False"
]
| [
"0.6746748",
"0.67260736",
"0.6515046",
"0.6514588",
"0.6514078",
"0.6365492",
"0.62904865",
"0.6278291",
"0.62414855",
"0.62305176",
"0.61929804",
"0.6121494",
"0.6096097",
"0.6079293",
"0.6074842",
"0.60705346",
"0.6037277",
"0.6031438",
"0.5947393",
"0.59265006",
"0.5915454",
"0.59101444",
"0.59071404",
"0.59009695",
"0.58914554",
"0.58891845",
"0.5868416",
"0.58329916",
"0.58316207",
"0.5831545"
]
| 0.6806946 | 0 |
Lookup the 'all' and 'all_count' views from the metadata or construct defaults. | def get_views(view_args, model_type):
# XXX Why pop?
metadata = view_args.pop('metadata')
all_view = metadata.get('views', {}).get('all')
if not all_view:
all_view= '%s/all'%model_type
all_count_view = metadata.get('views', {}).get('all_count')
if not all_count_view:
all_count_view= '%s/all_count'%model_type
return all_view, all_count_view | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_default_views(context, data_dict):\n # noinspection PyUnresolvedReferences\n frc = _get_or_bust(data_dict, 'frc')\n\n lc = ckanapi.LocalCKAN(context=context)\n\n cube_results = lc.action.package_search(\n q=(\n 'type:cube AND frc:{frc}'\n ).format(\n frc=frc\n ),\n )\n\n final_results = []\n\n for cube_result in cube_results.get('results') or []:\n if not cube_result.get('default_view_id'):\n # We don't care about cubes that have no default_view_id,\n # which may occur.\n continue\n\n view_results = lc.action.package_search(\n q=(\n 'type:view AND product_id_new:{view_id} AND '\n '-discontinued_code:1'\n ).format(\n view_id=cube_result['default_view_id']\n ),\n rows=1\n )\n\n if not view_results['count']:\n continue\n\n view = view_results['results'][0]\n\n final_results.append({\n u'cube': {\n u'frequency': cube_result.get('frequency_codes') or []\n },\n u'view': {\n u'title': view['title'],\n u'id': view['product_id_new']\n }\n })\n\n return final_results",
"def create_all_views():\n cursor.execute(articleList)\n cursor.execute(goodViews)\n cursor.execute(authorsTitles)\n cursor.execute(titleViews)\n cursor.execute(dailyTotalView)\n cursor.execute(dailyErrorView)",
"def documents(self, schema=None, wrapper=None, **params):\n return ViewResults(self.raw_view, '_all_docs',\n wrapper=wrapper, schema=schema, params=params)",
"def _get_view_args(self, all_args):\n view_args = dict((k, v) for k,v in all_args.iteritems() if k in ('descending', 'stale', 'skip', 'inclusive_end', 'update_seq'))\n limit = int(all_args.get('limit', 0))\n if limit>0:\n view_args['limit'] = limit\n return view_args",
"def getViews(read):\n ...",
"def get_all(self, start=0, count=-1, sort='', query='', view=''):\n return self._client.get_all(start, count, sort=sort, query=query, view=view)",
"def data_for_all(request):\n data = common_data(request)\n data.update({\"tags\": Tag.used_tags(),\n \"archive_qualifier\": \"\",\n \"recent_active_months\": Blog.recent_active_months()})\n return data",
"def overview(cls, queryset, *annotations):\n if select_related := cls.select_related:\n queryset = queryset.select_related(*select_related)\n if prefetch_related := cls.prefetch_related:\n queryset = queryset.prefetch_related(*prefetch_related)\n if all_annotations := cls.get_overview_annotations():\n if annotations:\n _annotations = {k: v for k, v in all_annotations.items() if k in annotations}\n else:\n _annotations = all_annotations\n queryset = queryset.annotate(**_annotations)\n return queryset",
"def index_queryset(self, using=None):\n return self.get_model()._default_manager.all()",
"def get_views(self):\n return self._get_types_from_default_ns(View)",
"def get_context_data(self, **kwargs):\n context = super(IndexView, self).get_context_data(**kwargs)\n context['questions'] = models.Question.objects.count()\n context['answers'] = models.Answer.objects.count()\n context['users'] = User.objects.count()\n return context",
"def get_queryset(self): # pylint: disable=arguments-differ\n qs = super(AbstractDocumentSummaryViewset, self).get_queryset()\n return qs.only(*self.query_fields)",
"def get_count_and_total_count(self, queryset, view):\n count = queryset.count() # replace count by real count - not only drf-datatables count\n if hasattr(view, '_datatables_total_count'):\n total_count = view._datatables_total_count\n del view._datatables_total_count\n else: # pragma: no cover\n total_count = count\n return count, total_count",
"def toall_get(self, request):\n _view = _object_view(self, request)\n queried = ToAllChannelPostings(request.params.mixed()).query()\n objs = [request.view(obj) for obj in queried[0]]\n _view.update({\n \"postings\": objs,\n \"result_complete\": queried[1]\n })\n return _view",
"def load_all_queryset(self):\n return self.get_model()._default_manager.all()",
"def get_all_summaries() -> Dict[str, CBSummary]:\n return _SUMMARIES",
"def page_views(self, *args, **kwargs):\r\n return self._get('PageViews', *args, **kwargs)",
"def read_all(self, *args, **kwargs):\n pass",
"def collection_get(self):\n if self.request.params.get(\"all\", \"\"):\n collection_data = [i.serialize(\"view\") for i in self.context.documents]\n else:\n collection_data = sorted(\n dict([(i.id, i.serialize(\"view\")) for i in self.context.documents]).values(),\n key=lambda i: i[\"dateModified\"],\n )\n return {\"data\": collection_data}",
"def index(request):\n\n # Generate counts of some of the main objects\n num_posts = Post.objects.all().count()\n num_category = Category.objects.all().count()\n \n # The 'all()' is implied by default. \n num_authors = Author.objects.count()\n \n context = {\n 'num_posts': num_posts,\n 'num_category': num_category,\n 'num_authors': num_authors,\n }\n\n # Render the HTML template index.html with the data in the context variable\n return render(request, 'index.html', context=context)",
"def opt_get_all_models_rest_api():\n return retrieve_all_models()",
"def all_common_variables(request):\n articles = Article.objects.all()\n random_article = Article.objects.order_by('?')[0:4]\n return {\n 'articles':articles,\n 'random_article':random_article,\n }",
"def usage_for_queryset(self, queryset, counts=False, min_count=None):\r\n if parse_lookup:\r\n raise AttributeError(\"'TagManager.usage_for_queryset' is not compatible with pre-queryset-refactor versions of Django.\")\r\n\r\n extra_joins = ' '.join(queryset.query.get_from_clause()[0][1:])\r\n where, params = queryset.query.where.as_sql()\r\n if where:\r\n extra_criteria = 'AND %s' % where\r\n else:\r\n extra_criteria = ''\r\n return self._get_usage(queryset.model, counts, min_count, extra_joins, extra_criteria, params)",
"def initView(self):\n return {}",
"def usage_for_queryset(self, queryset, counts=False, min_count=None, extra=None):\n if parse_lookup:\n raise AttributeError(\"'TagManager.usage_for_queryset' is not compatible with pre-queryset-refactor versions of Django.\")\n\n extra_joins = ' '.join(queryset.query.get_from_clause()[0][1:])\n where, params = queryset.query.where.as_sql()\n if where:\n extra_criteria = 'AND %s' % where\n else:\n extra_criteria = ''\n return self._get_usage(queryset.model, counts, min_count, extra_joins, extra_criteria, params, extra)",
"def get_default_responses(self):\n method = self.method.lower()\n\n default_status = status.HTTP_200_OK\n default_schema = ''\n if method == 'post':\n default_status = status.HTTP_201_CREATED\n default_schema = self.get_request_serializer() or self.get_view_serializer()\n elif method == 'delete':\n default_status = status.HTTP_204_NO_CONTENT\n elif method in ('get', 'put', 'patch'):\n default_schema = self.get_request_serializer() or self.get_view_serializer()\n\n default_schema = default_schema or ''\n if any(is_form_media_type(encoding) for encoding in self.get_consumes()):\n default_schema = ''\n if default_schema:\n if not isinstance(default_schema, openapi.Schema):\n default_schema = self.serializer_to_schema(default_schema)\n if is_list_view(self.path, self.method, self.view) and self.method.lower() == 'get':\n default_schema = openapi.Schema(type=openapi.TYPE_ARRAY, items=default_schema)\n if self.should_page():\n default_schema = self.get_paged_response_schema(default_schema)\n\n return {str(default_status): default_schema}",
"def get_compute_statistics(cls, all_tensors):\n flags = {\n \"total\": {\"count\": 0, \"tensors\": []},\n None: {\"count\": 0, \"tensors\": []},\n \"unknown\": {\"count\": 0, \"tensors\": []},\n }\n for index in OpFlags:\n index = index.value\n flags[index] = {\"count\": 0, \"tensors\": []}\n for tensor in all_tensors:\n flag = tensor.op.tag\n cls.count_flag(flags, flag, tensor)\n return flags",
"def usage_for_model(self, model, counts=False, min_count=None, filters=None, extra=None):\n if extra is None: extra = {}\n if filters is None: filters = {}\n\n if not parse_lookup:\n # post-queryset-refactor (hand off to usage_for_queryset)\n queryset = model._default_manager.filter()\n for f in filters.items():\n queryset.query.add_filter(f)\n usage = self.usage_for_queryset(queryset, counts, min_count, extra)\n else:\n # pre-queryset-refactor\n extra_joins = ''\n extra_criteria = ''\n params = []\n if len(filters) > 0:\n joins, where, params = parse_lookup(filters.items(), model._meta)\n extra_joins = ' '.join(['%s %s AS %s ON %s' % (join_type, table, alias, condition)\n for (alias, (table, join_type, condition)) in joins.items()])\n extra_criteria = 'AND %s' % (' AND '.join(where))\n usage = self._get_usage(model, counts, min_count, extra_joins, extra_criteria, params, extra)\n\n return usage",
"def index_queryset(self, using=None):\n return self.get_model().objects.all()",
"def index_queryset(self, using=None):\n return self.get_model().objects.all()"
]
| [
"0.5791284",
"0.5705834",
"0.5399181",
"0.53550243",
"0.5300899",
"0.5277965",
"0.5185668",
"0.51750314",
"0.514737",
"0.5087578",
"0.50462204",
"0.49970847",
"0.4987755",
"0.49721837",
"0.49630272",
"0.48864326",
"0.48848885",
"0.48412365",
"0.48411632",
"0.4757586",
"0.47511986",
"0.47420377",
"0.47223866",
"0.47065568",
"0.4698542",
"0.46908984",
"0.469065",
"0.46887162",
"0.46875495",
"0.46875495"
]
| 0.6535335 | 0 |
Return a flash message box element. | def flash_message(self, request):
return FlashMessagesElement() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getMessageBox(self, id):\n try:\n messageBoxWrapUp = self._getMessageBoxCompactWrapUp(id)\n\n return messageBoxWrapUp.messageBox\n except:\n return None",
"def _flash(self,id,msg,duration=30.0):\n if duration>0:\n pass #gtk.timeout_add(duration,'')\n return self.statusbar.push(id,msg)",
"def get_flash():\n key = settings.FLASH_COOKIE_NAME\n data = getattr(local, 'flash_message', None)\n if data is None:\n if key in local.request.cookies:\n data = local.request.cookies[key]\n local.flash_message = None\n if data:\n return pickle.loads(b64decode(data))\n return u''",
"def message(self,message,style=wx.OK | wx.ICON_INFORMATION):\n dlg = wx.MessageDialog(self, message, self.app.title, style)\n answer = dlg.ShowModal()\n dlg.Destroy()\n return answer",
"def message(kind: str, **options: str):\n\n #disable parent placement since root_deco automates this\n options.pop('parent', None)\n return getattr(tkmsgbox, kind)(**options)",
"def FailureMessage(self, message, caption):\n wx.MessageBox(message, caption, style=wx.OK|wx.ICON_ERROR)",
"def display_message():",
"def flash(text, type=INFO):\n flash_message['text'] = text\n flash_message['type'] = type",
"def show_message(request):\n return render_to_response('woodstock/messages/view.html', {},\n context_instance = RequestContext(request))",
"def display_message(window, msg):\n v = create_output_panel(window, '')\n _append(v, msg)",
"def create_error_box(self, message):\n messagebox.showerror(\"Error\", message)",
"def popup_error(msg: str):\n popup = tkinter.Tk()\n popup.wm_title(\"An Error has Occurred\")\n label = tkinter.Label(popup, text=msg)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n b1 = tkinter.Button(popup, text=\"Okay\", command=popup.destroy)\n b1.pack(pady=10)\n popup.mainloop()",
"def showMessage(self):",
"def MessageDialog( message, caption, style ):\n dlg = wx.MessageDialog( wx.GetApp().GetTopWindow(), message, caption, style )\n result = dlg.ShowModal()\n dlg.Destroy()\n \n return result",
"def read_message_box(timeout=default_timeout):\n start_time = time.time()\n while time.time() - start_time <= timeout:\n try:\n if _is_element_present(controls['Message Box']['Message']):\n return _get_text(controls['Message Box']['Message'])\n except:\n continue\n else:\n logger.warning(\"Could not get text of message box\")\n return None",
"def bootstrap_messages(context, *args, **kwargs):\n return get_template('bootstrap3/messages.html').render(context)",
"def message_box(subject, content):\r\n root = tk.Tk()\r\n root.attributes(\"-topmost\", True)\r\n root.withdraw()\r\n messagebox.showinfo(subject, content)\r\n try:\r\n root.destroy()\r\n except:\r\n pass",
"def showmessage(self):\n return self.message",
"def showmessage(self):\n return self.message",
"def _flash(self):\n return self.response.context[CONTEXT_VAR]",
"def popErrorMessage(self, message):\n\n\t\tmessagebox.showinfo(\"Warning\", message, parent = self)",
"def popUpMessage(message):\n pop_up = Tk()\n pop_up.wm_title(\"File Error\")\n label = Label( pop_up, text = message, font = APP_FONT + \" 12\")\n label.pack( side=\"top\", fill=\"x\", pady=20 )\n button = Button(pop_up, text=\"Okay\", command = pop_up.destroy, font = APP_FONT + \" 12 bold\", bg = \"red\", fg=\"white\", borderwidth=6,)\n button.pack()\n pop_up.mainloop()",
"def modeMsgBox(self, messageText):\n self.createMessage(messageText)",
"def showmessage(parent, message, title, flags = wx.OK):\n\tdlg = wx.MessageDialog(parent, message, title, flags)\n\tdlg.ShowModal()\n\tdlg.Destroy()",
"def display_message(self, message):\n params = {\n 'message': message\n }\n self.render_template('message.html', params)",
"def make_message_box(self):\n box_image = setup.GFX['dialoguebox']\n box_rect = box_image.get_rect()\n text = 'You have died. Restart from last save point?'\n text_render = self.font.render(text, True, c.NEAR_BLACK) \n text_rect = text_render.get_rect(centerx=box_rect.centerx,\n y=30)\n text2 = 'Yes'\n text2_render = self.font.render(text2, True, c.NEAR_BLACK)\n text2_rect = text2_render.get_rect(centerx=box_rect.centerx,\n y=70)\n\n text3 = 'No'\n text3_render = self.font.render(text3, True, c.NEAR_BLACK)\n text3_rect = text3_render.get_rect(centerx=box_rect.centerx,\n y=105)\n\n temp_surf = pg.Surface(box_rect.size)\n temp_surf.set_colorkey(c.BLACK)\n temp_surf.blit(box_image, box_rect)\n temp_surf.blit(text_render, text_rect)\n temp_surf.blit(text2_render, text2_rect)\n temp_surf.blit(text3_render, text3_rect)\n \n box_sprite = pg.sprite.Sprite()\n box_sprite.image = temp_surf\n box_sprite.rect = temp_surf.get_rect(bottom=608)\n \n return box_sprite",
"def warning(self, message):\n msg_dlg = wx.MessageDialog(None, message, '', wx.OK | wx.CANCEL| wx.ICON_ERROR)\n val = msg_dlg.ShowModal()\n msg_dlg.Show()\n msg_dlg.Destroy()\n return val",
"def flash_msg(self, params):\n if params.has_key('receiver'): name = params['receiver']\n else: \n if self.participant: \n group = self.service.groupOfParticipant(self.participant)\n if group: \n member_avail = filter(lambda x:x.status == LISTEN and x.name != self.name,group.members)\n if member_avail:\n member = member_avail.pop()\n name = member.name\n else:\n self.notLoggedIn()\n return\n if params.has_key('text'): text = params['text']\n else: return\n\n logger.writeLog(\"%s@%s said:'%s'\" % (self.name,self.transport.hostname,text))\n \n if self.participant:\n msgMethod = self.participant.directMessage\n try:\n self.service.sendParticipants(self.name,\"botmsg\",{\"text\":text,\"sender\":self.name})\n msgMethod(name,text)\n except:\n self.receiveDirectCommand(\"msg\",{\"sender\":\"MsgServ\",\"text\":\"cant send text, probably there is no user to listen\"})\n else:\n self.notLoggedIn()",
"def display_message(self, message):\n\t\tself.render('message.html', {'message': message})",
"def alert_message(self):\r\n alerts = self.q(css=\"div.open-ended-alert\").text\r\n\r\n if len(alerts) < 1:\r\n return \"\"\r\n else:\r\n return alerts[0]"
]
| [
"0.6299474",
"0.6288294",
"0.61859554",
"0.61429226",
"0.60550874",
"0.60471123",
"0.5999118",
"0.59611005",
"0.58801323",
"0.58488834",
"0.58245665",
"0.5819826",
"0.5815885",
"0.58019423",
"0.57987386",
"0.57850605",
"0.5783196",
"0.5781271",
"0.5781271",
"0.5745172",
"0.5744404",
"0.57343394",
"0.55993474",
"0.5596065",
"0.55762786",
"0.5567825",
"0.5557855",
"0.5540118",
"0.5506961",
"0.54694587"
]
| 0.8044592 | 1 |
Confirm that the src and dest docs match in terms of id and rev, raising an HTTP exception on failure. A BadRequestError is raised if the ids do not match. A ConflictError is raised if the revs do not match.d | def confirm_doc_and_rev(src, dest):
if src['_id'] != dest['_id']:
raise http.BadRequestError('incorrect id')
if src['_rev'] != dest['_rev']:
raise http.ConflictError([('Content-Type', 'text/plain')], 'rev is out of date') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_revision(self, request):\n assert hasattr(self, 'doc'), \"dispatcher document must be set\"\n try:\n rev = request.cgi_fields['_rev'].value\n except KeyError:\n return\n if rev != self.doc.rev:\n raise HTTP_CONFLICT(\"Your edit was based on an entity document that\"\n \" was changed by someone else after you loaded\"\n \" the edit page; the document revisions do\"\n \" not match. Go back to the entity ('Cancel')\"\n \" and retry your edit...\")",
"def test_with_draft_diff(self):\n repository = self.create_repository(tool_name='Test')\n review_request = self.create_review_request(\n repository=repository,\n submitter=self.user,\n publish=True)\n diffset = self.create_diffset(review_request, draft=True)\n filediff = self.create_filediff(diffset)\n\n rsp = self.api_get(\n get_original_file_url(review_request, diffset, filediff),\n expected_status=404)\n self.assertEqual(rsp['stat'], 'fail')\n self.assertEqual(rsp['err']['code'], DOES_NOT_EXIST.code)",
"def test_one_revision_created(self):\n with factories.single_commit():\n source = factories.ProgramFactory()\n destination = factories.ObjectiveFactory()\n\n data = [{\n \"relationship\": {\n \"context\": None,\n \"destination\": {\n \"id\": source.id,\n \"type\": \"Program\",\n \"href\": \"/api/programs/{}\".format(source.id)\n },\n \"source\": {\n \"id\": destination.id,\n \"type\": \"Objective\",\n \"href\": \"/api/objectives/{}\".format(destination.id)\n }\n }\n }]\n response = self.api.client.post(\n \"/api/relationships\",\n data=json.dumps(data),\n headers=self.headers\n )\n self.assert200(response)\n rel_id = all_models.Relationship.query.one().id\n revs_count = all_models.Revision.query.filter_by(\n source_type=\"Objective\", destination_type=\"Program\"\n ).count()\n events_count = all_models.Event.query.filter_by(\n resource_id=rel_id, resource_type=\"Relationship\",\n ).count()\n self.assertEqual(revs_count, 1)\n self.assertEqual(events_count, 1)\n\n response = self.api.client.post(\n \"/api/relationships\",\n data=json.dumps(data),\n headers=self.headers\n )\n self.assert200(response)\n new_revs_count = all_models.Revision.query.filter_by(\n source_type=\"Objective\", destination_type=\"Program\"\n ).count()\n events_count = all_models.Event.query.filter_by(\n resource_id=rel_id, resource_type=\"Relationship\",\n ).count()\n self.assertEqual(new_revs_count, 1)\n self.assertEqual(events_count, 1)",
"def _matcher(r1: vcr.request.Request, r2: vcr.request.Request) -> None:\n assert r1.uri == r2.uri and r1.body == r2.body and r1.headers == r2.headers",
"def _check_regr(self, regr, new_reg):\n body = getattr(new_reg, 'body', new_reg)\n for k, v in body.items():\n if k == 'resource' or not v:\n continue\n if regr.body[k] != v:\n raise errors.UnexpectedUpdate(regr)\n if regr.body.key != self.key.public_key():\n raise errors.UnexpectedUpdate(regr)\n return regr",
"def check_diff(src, dst):\n result = _subprocess(['git', '--no-pager', 'log', '--graph', '--abbrev-commit', '--pretty=oneline',\n '--no-merges', \"--\", f\"{src}\", f\"^{dst}\"])\n\n if result:\n print(f\"Warning: the following commits are present on {dst} but not on {src}: \\n{result}\")\n if args.force:\n print(f\"Warning: they will be overwritten on {dst} and discarded.\")\n else:\n print(f\"Warning: run with --force to overwrite and discard these commits from {dst}\")\n exit(1)",
"def assert_equal_resource(res1, res2):\n assert isinstance(res1, FakedBaseResource)\n assert isinstance(res2, FakedBaseResource)\n assert res1.uri == res2.uri\n assert res1.oid == res2.oid\n names1 = set(res1.properties.keys())\n names2 = set(res2.properties.keys())\n if names1 != names2:\n raise AssertionError(\n \"Resources do not have the same set of properties:\\n\"\n \"- res1 names: {}\\n\"\n \"- res2 names: {}\\n\".\n format(names1, names2))\n for name in res1.properties:\n value1 = res1.properties[name]\n value2 = res2.properties[name]\n if value1 != value2:\n raise AssertionError(\n \"Resources do not have the same value for property {}:\\n\"\n \"- res1 value: {}\\n\"\n \"- res2 value: {}\\n\".\n format(name, value1, value2))",
"def fail_on_unacknowledged_changes(args):\n if not filecmp.cmp(args.golden, args.current):\n return GoldenMismatchError(\n api_level=args.api_level,\n current=args.current,\n golden=args.golden,\n show_update_hint=True,\n )\n return None",
"def ddl_compare(self, **kwargs):\n\n source = ''\n target = ''\n diff = ''\n comp_status = kwargs.get('comp_status')\n only_diff = False\n generate_script = False\n\n source_params = {'gid': 1,\n 'sid': kwargs.get('source_sid'),\n 'did': kwargs.get('source_did'),\n 'scid': kwargs.get('source_scid'),\n 'oid': kwargs.get('source_oid')\n }\n\n target_params = {'gid': 1,\n 'sid': kwargs.get('target_sid'),\n 'did': kwargs.get('target_did'),\n 'scid': kwargs.get('target_scid'),\n 'oid': kwargs.get('target_oid')\n }\n\n if 'source_tid' in kwargs:\n source_params['tid'] = kwargs['source_tid']\n only_diff = True\n if 'target_tid' in kwargs:\n target_params['tid'] = kwargs['target_tid']\n only_diff = True\n\n if 'generate_script' in kwargs and kwargs['generate_script']:\n generate_script = True\n\n source_params_adv = copy.deepcopy(source_params)\n target_params_adv = copy.deepcopy(target_params)\n\n del source_params_adv['gid']\n del target_params_adv['gid']\n\n status, target_schema = self.get_schema(kwargs.get('target_sid'),\n kwargs.get('target_did'),\n kwargs.get('target_scid')\n )\n if not status:\n return internal_server_error(errormsg=target_schema)\n\n if comp_status == SchemaDiffModel.COMPARISON_STATUS['source_only']:\n if not generate_script:\n source = self.get_sql_from_diff(**source_params)\n source_params.update({\n 'diff_schema': target_schema\n })\n diff = self.get_sql_from_diff(**source_params)\n\n elif comp_status == SchemaDiffModel.COMPARISON_STATUS['target_only']:\n if not generate_script:\n target = self.get_sql_from_diff(**target_params)\n target_params.update(\n {'drop_sql': True})\n diff = self.get_sql_from_diff(**target_params)\n\n elif comp_status == SchemaDiffModel.COMPARISON_STATUS['different']:\n source = self.fetch_objects_to_compare(**source_params_adv)\n target = self.fetch_objects_to_compare(**target_params_adv)\n\n if not (source or target):\n return None\n\n diff_dict = directory_diff(source,\n target,\n ignore_keys=self.keys_to_ignore,\n difference={}\n )\n\n diff_dict.update(self.parce_acl(source, target))\n\n if not generate_script:\n source = self.get_sql_from_diff(**source_params)\n target = self.get_sql_from_diff(**target_params)\n\n target_params.update(\n {'data': diff_dict})\n diff = self.get_sql_from_diff(**target_params)\n else:\n source = self.get_sql_from_diff(**source_params)\n target = self.get_sql_from_diff(**target_params)\n\n if only_diff:\n return diff\n\n return {'source_ddl': source,\n 'target_ddl': target,\n 'diff_ddl': diff\n }",
"def compare_status(\n src: \"ObjectDB\",\n dest: \"ObjectDB\",\n objs: Iterable[\"HashFile\"],\n log_missing: bool = True,\n check_deleted: bool = True,\n src_index: Optional[\"ObjectDBIndexBase\"] = None,\n dest_index: Optional[\"ObjectDBIndexBase\"] = None,\n **kwargs,\n) -> \"CompareStatusResult\":\n dest_exists, dest_missing = status(dest, objs, index=dest_index, **kwargs)\n # for transfer operations we can skip src status check when all objects\n # already exist in dest\n if dest_missing or check_deleted:\n src_exists, src_missing = status(src, objs, index=src_index, **kwargs)\n else:\n src_exists = dest_exists\n src_missing = set()\n result = CompareStatusResult(\n src_exists & dest_exists,\n src_missing & dest_missing,\n src_exists - dest_exists,\n dest_exists - src_exists,\n )\n if log_missing and result.missing:\n missing_desc = \"\\n\".join(\n f\"name: {obj.name}, {obj.hash_info}\" for obj in result.missing\n )\n logger.warning(\n \"Some of the cache files do not exist neither locally \"\n f\"nor on remote. Missing cache files:\\n{missing_desc}\"\n )\n return result",
"def conflict():\n return HttpError(409)",
"def test_review_story_invalid_status(self):\n self.client.post('/api/stories', headers={'token': user_token}, data=json.dumps(story1))\n res = self.client.put('/api/stories/1/review', headers={'token': admin_token}, data=json.dumps({\n 'status': 'Invalid'\n }))\n result = json.loads(res.data.decode())\n self.assertEqual(result['message'], 'Invalid status')\n self.assertEqual(res.status_code, 400)",
"def test_releaseresourcesrequest_object_equality():\n constructor_args = dict(\n interface=\"https://schema.skao.int/ska-low-mccs-releaseresources/2.0\",\n subarray_id=1,\n release_all=True,\n )\n request = ReleaseResourcesRequest(**constructor_args)\n\n # objects with same property values are considered equal\n other = ReleaseResourcesRequest(**constructor_args)\n assert request == other\n\n # objects where any property differs are considered unequal\n different_args = dict(\n interface=\"https://schema.skao.int/ska-low-mccs-releaseresources/999.0\",\n subarray_id=2,\n release_all=False,\n )\n for k, v in different_args.items():\n other_args = dict(constructor_args)\n other_args[k] = v\n assert request != ReleaseResourcesRequest(**other_args)",
"def compare(src, dest):\n xsrc, xdest = os.path.exists(src), os.path.exists(dest)\n if not xsrc:\n return Cmp.nosrc\n if not xdest:\n return Cmp.nodest\n with open(src, \"rb\") as s:\n csrc = sha256(s.read()).digest()\n if xdest:\n with open(dest, \"rb\") as d:\n cdest = sha256(d.read()).digest()\n else:\n cdest = b\"\"\n if csrc == cdest:\n return Cmp.same\n return Cmp.differ",
"def assertHttpSeeOther(self, resp):\r\n return self.assertEqual(resp.status_code, 303)",
"def getMissingRevisionsDiff(self, docId, docRevs):\n return self.client.post(self.name +\"/_revs_diff\", None,\n {docId: docRevs}).getBodyData()",
"def test_put_review_detail_fail(self):\n client = Client()\n review1_id = Review.objects.get(content='TEST_CONTENT').id\n review2_id = Review.objects.get(content='TEST_CONTENT2').id\n review3_id = Review.objects.get(content='TEST_CONTENT3').id\n review4_id = Review.objects.get(content='TEST_CONTENT4').id\n no_review_id = review1_id + review2_id + review3_id + review4_id\n response = client.put('/api/review/'+str(review1_id)+'/', {\n 'content': 'TEST_PUT_CONTENT',\n 'restaurant_name': 'TEST_REST',\n 'menu_name': 'TEST_MENU',\n 'rating': 3\n }, 'application/json')\n self.assertEqual(response.status_code, 401)\n client.login(username='TEST_USER_2',\n email='TEST_EMAIL_2', password='TEST_PW_2')\n response = client.put('/api/review/'+str(review1_id)+'/', {\n 'content': 'TEST_PUT_CONTENT',\n 'restaurant_name': 'TEST_REST',\n 'menu_name': 'TEST_MENU',\n 'rating': 3\n }, 'application/json')\n self.assertEqual(response.status_code, 403)\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.put('/api/review/'+str(review1_id)+'/', {\n 'content': 'TEST_PUT_CONTENT',\n 'restaurant_name': 'TEST_REST',\n 'menu_name': 'TEST_MENU',\n 'rating': 3\n })\n self.assertEqual(response.status_code, 400)\n response = client.put('/api/review/'+str(no_review_id)+'/', {\n 'content': 'TEST_PUT_CONTENT',\n 'restaurant_name': 'TEST_REST',\n 'menu_name': 'TEST_MENU',\n 'rating': 3\n }, 'application/json')\n self.assertEqual(response.status_code, 404)\n response = client.put('/api/review/'+str(review1_id)+'/', json.dumps({\n 'content': 'TEST_PUT_CONTENT',\n 'restaurant_name': 'TEST_REST_N',\n 'menu_name': 'TEST_MENU_N',\n 'rating': 3,\n 'category': 'NEW_TEST_CATEGORY'\n }), 'application/json')\n self.assertEqual(response.status_code, 400)",
"def validate_reply(request, reply):\n assert isinstance(reply, dict) and 'id' in reply\n assert ('result' in reply) != ('error' in reply)\n assert reply['id'] == request['id'] or \\\n reply['id'] == '00' and 'error' in reply",
"def differences(actual, expected, f_actual, f_expected):\n if not actual.equals(expected):\n msg = \"COMPARE RESULTS DIFFER\\n\"\n msg += \"-------------------------------------------------\\n\"\n msg += \"--- NEW RESULTS IN {} FILE ---\\n\"\n msg += \"--- if new OK, copy {} to ---\\n\"\n msg += \"--- {} ---\\n\"\n msg += \"--- and rerun test. ---\\n\"\n msg += \"-------------------------------------------------\\n\"\n raise ValueError(msg.format(f_actual, f_actual, f_expected))",
"def testRef(self):\n self.assertEqual(\n 'uniqueId',\n self.ccr.id\n )\n\n self.assertEqual(\n 'oldId',\n self.ccr_bad.id\n )",
"def test_revision_diff_delete_then_rollback(self):\n payload = base.DocumentFixture.get_minimal_fixture()\n bucket_name = test_utils.rand_name('bucket')\n created_documents = self.create_documents(bucket_name, payload)\n revision_id = created_documents[0]['revision_id']\n\n # Delete all previously created documents.\n deleted_documents = self.create_documents(bucket_name, [])\n comparison_revision_id = deleted_documents[0]['revision_id']\n\n # Validate that the empty bucket is deleted.\n self._verify_buckets_status(\n revision_id, comparison_revision_id, {bucket_name: 'deleted'})\n\n # Rollback to first non-empty revision.\n rollback_revision_id = self.rollback_revision(revision_id)['id']\n # Validate that diffing rolled-back revision against 1 is unmodified.\n self._verify_buckets_status(\n revision_id, rollback_revision_id, {bucket_name: 'unmodified'})\n\n # Validate that diffing rolled-back revision against 2 is created\n # (because the rolled-back revision is newer than revision 2).\n self._verify_buckets_status(\n comparison_revision_id, rollback_revision_id,\n {bucket_name: 'created'})",
"def test_parse_diff_revision_with_remote_and_short_SHA1_error(self):\n with self.assertRaises(ShortSHA1Error):\n self.remote_tool.parse_diff_revision(filename=b'README',\n revision=b'd7e96b3')",
"def check(src, perm, dest, cmds, comp, verbose=False):\n if comp == Cmp.differ:\n ansiprint(f\"The file '{src}' differs from '{dest}'.\", fg=Color.red, i=True)\n elif comp == Cmp.nodest:\n ansiprint(\n f\"The destination file '{dest}' does not exist\",\n fg=Color.black,\n bg=Color.red,\n )\n elif comp == Cmp.nosrc:\n ansiprint(\n f\"The source file '{src}' does not exist.\", fg=Color.black, bg=Color.red\n )\n elif comp == Cmp.same and verbose:\n ansiprint(f\"The files '{src}' and '{dest}' are the same.\", fg=Color.green)",
"def test_releaseresourcesrequest_equality_with_other_objects():\n constructor_args = dict(\n interface=\"https://schema.skao.int/ska-low-mccs-releaseresources/2.0\",\n subarray_id=1,\n release_all=True,\n )\n request = ReleaseResourcesRequest(**constructor_args)\n\n assert request != 1\n assert request != object()",
"def test_merge_fails_parameters(self):\n p1 = PathFactory.create()\n p2 = PathFactory.create()\n response = self.client.post(reverse('core:path-drf-merge-path'), {'path[]': [p1.pk]})\n self.assertEqual({'error': 'You should select two paths'}, response.json())\n\n response = self.client.post(reverse('core:path-drf-merge-path'), {'path[]': [p1.pk, p1.pk, p2.pk]})\n self.assertEqual({'error': 'You should select two paths'}, response.json())",
"def test_update_task_docs_not_invalid_url(self):\n task_id = util.MOCK_UUID_5\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\"docs\": \"notAValidUrl\"},\n )\n result = rv.json()\n expected = {\"code\": \"NotValidUrl\", \"message\": \"Input is not a valid URL\"}\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 400)",
"def test_integrate_sources_with_crecord(dclient, admin_user, example_crecord):\n dclient.force_authenticate(user=admin_user)\n docket = os.listdir(\"tests/data/dockets/\")[0]\n with open(f\"tests/data/dockets/{docket}\", \"rb\") as d:\n doc_1 = SourceRecord.objects.create(\n caption=\"Hello v. World\",\n docket_num=\"MC-1234\",\n court=SourceRecord.Courts.CP,\n url=\"https://abc.def\",\n record_type=SourceRecord.RecTypes.DOCKET_PDF,\n file=File(d),\n owner=admin_user,\n )\n summary = os.listdir(\"tests/data/summaries\")[0]\n with open(f\"tests/data/summaries/{summary}\", \"rb\") as s:\n doc_2 = SourceRecord.objects.create(\n caption=\"Hello v. Goodbye\",\n docket_num=\"MC-1235\",\n court=SourceRecord.Courts.MDJ,\n url=\"https://def.ghi\",\n record_type=SourceRecord.RecTypes.SUMMARY_PDF,\n file=File(s),\n owner=admin_user,\n )\n\n # when sent to api, serialized document data won't have a file included.\n # The request is asking to do stuff using the file that is on the server.\n doc_1_data = SourceRecordSerializer(doc_1).data\n # doc_1_data.pop(\"file\")\n\n doc_2_data = SourceRecordSerializer(doc_2).data\n # doc_2_data.pop(\"file\")\n source_records = [doc_1_data, doc_2_data]\n data = {\n \"crecord\": CRecordSerializer(example_crecord).data,\n \"source_records\": source_records,\n }\n\n resp = dclient.put(\"/api/record/cases/\", data=data)\n assert resp.status_code == 200\n assert \"crecord\" in resp.data\n assert \"source_records\" in resp.data\n # the response source_records list might include new source records, so will be at\n # least as long as the original source records list.\n assert len(resp.data[\"source_records\"]) >= len(source_records)\n try:\n\n CRecord.from_dict(resp.data[\"crecord\"])\n except Exception as err:\n pytest.fail(err)",
"def test_conflict(self):\n self._error_test(fitbit_exceptions.HTTPConflict)",
"def test_merge_fails_other_path_intersection_less_than_snapping(self):\n path_a = PathFactory.create(name=\"A\", geom=LineString((0, 0), (10, 0)))\n path_b = PathFactory.create(name=\"B\", geom=LineString((11, 0), (20, 0)))\n PathFactory.create(name=\"C\", geom=LineString((10, 1), (10, 10)))\n response = self.client.post(reverse('core:path-drf-merge-path'), {'path[]': [path_a.pk, path_b.pk]})\n json_response = response.json()\n self.assertIn('error', json_response)\n self.assertEqual(json_response['error'], \"You can't merge 2 paths with a 3rd path in the intersection\")",
"def test_wrong_id(self):\n self.request.matchdict = {'user_id': int(self.request.user.id)+4}\n self.request.json_body = {}\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))"
]
| [
"0.60801184",
"0.5607128",
"0.5460178",
"0.53366244",
"0.5295656",
"0.5231881",
"0.51995516",
"0.5198802",
"0.5194811",
"0.51876605",
"0.5168762",
"0.5166662",
"0.51323646",
"0.5114137",
"0.51095927",
"0.510616",
"0.5049082",
"0.5046716",
"0.5040745",
"0.5032963",
"0.5015139",
"0.5012835",
"0.49904034",
"0.49808487",
"0.49504974",
"0.49469674",
"0.4945637",
"0.493387",
"0.48775315",
"0.48731977"
]
| 0.81150794 | 0 |
Create a form for the given model type. | def _form_for_type(request, C, defn, add_id_and_rev=False):
form = build(defn, C, add_id_and_rev=add_id_and_rev,
widget_registry=_widget_registry(request))
form.renderer = request.environ['restish.templating'].renderer
return form | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_form_class(self):\r\n return modelform_factory(self.model)",
"def make_form(self):",
"def get_form_for_model(\n model,\n form_class=WagtailAdminModelForm,\n **kwargs,\n):\n\n # This is really just Django's modelform_factory, tweaked to accept arbitrary kwargs.\n\n meta_class_attrs = kwargs\n meta_class_attrs[\"model\"] = model\n\n # The kwargs passed here are expected to come from EditHandler.get_form_options, which collects\n # them by descending the tree of child edit handlers. If there are no edit handlers that\n # specify form fields, this can legitimately result in both 'fields' and 'exclude' being\n # absent, which ModelForm doesn't normally allow. In this case, explicitly set fields to [].\n if \"fields\" not in meta_class_attrs and \"exclude\" not in meta_class_attrs:\n meta_class_attrs[\"fields\"] = []\n\n # Give this new form class a reasonable name.\n class_name = model.__name__ + \"Form\"\n bases = (form_class.Meta,) if hasattr(form_class, \"Meta\") else ()\n Meta = type(\"Meta\", bases, meta_class_attrs)\n form_class_attrs = {\"Meta\": Meta}\n\n metaclass = type(form_class)\n return metaclass(class_name, (form_class,), form_class_attrs)",
"def create(request, model, decorator = lambda x:x,\r\n post_save_redirect='', template_name=''):\r\n \r\n FormClass = decorator(\r\n forms.form_for_model(\r\n model,\r\n fields = get_allowed_fields(request, model),\r\n ),\r\n request,\r\n )\r\n \r\n template_name = template_name or _make_template_name(model, 'form')\r\n\r\n if request.method == 'POST':\r\n form = FormClass(request.POST)\r\n if form.is_valid():\r\n record = form.save(commit = False)\r\n record.account = request.account\r\n record.created_by = request.person\r\n record.save()\r\n return HttpResponseRedirect(\r\n post_save_redirect or record.get_absolute_url()\r\n )\r\n else:\r\n form = FormClass()\r\n return render_to_response(\r\n template_name,\r\n context_instance = RequestContext(\r\n request,\r\n {'form': form}\r\n )\r\n )",
"def get_form(self, kind, ins = None):\n from aha.widget.form import Form\n from aha.widget.field import TextField, RichText\n from formencode import validators as v\n\n class AddForm(Form):\n multipart = True\n form_title = u'Add New Category'\n button_title = u'Add'\n submit = u'Save'\n name = TextField(title = u'ID', args = {'size':40},\n validator = v.String(), required = True)\n title = TextField(title = u'Title', args = {'size':40},\n validator = v.String(), required = True)\n description = RichText(title = u'Description', args = dict(rows = 4),\n required = False, collapsable = True)\n \n class EditForm(AddForm):\n form_title = u'Edit Category'\n \n del EditForm['name']\n\n if kind == 'add':\n return AddForm()\n elif kind == 'edit':\n return EditForm()",
"def create_model_form(name, fields=''):\n if '/' in name:\n blueprint_name, model_name = name.split('/')\n output_file = 'blueprints/%s/forms.py' % blueprint_name\n else:\n model_name = name\n output_file = 'forms.py'\n file_exists = os.path.exists(output_file)\n field_args = []\n for f in fields.split():\n field_name = f.split(':')[0]\n field_args.append(create_model_form.field_args % dict(field_name=field_name))\n form = create_model_form.form_scaffold % dict(model_name=model_name.capitalize(), field_args=''.join(field_args))\n with open(output_file, 'a') as out_file:\n if not file_exists:\n form = '''%(imports)s\\n%(rest)s''' % dict(imports=create_model_form.imports,\n rest=form)\n out_file.write(form)",
"def show_create_form(self):\n # if there is no add permission then does not show the form\n if not self.has_add_permissions(): return\n\n params = {\n 'title':'Create',\n 'model':self.model,\n 'parent_model':self.parent_model,\n 'parent_pk':self.parent_pk,\n 'parent_win': self\n }\n\n if self.INLINES: params.update({'inlines':self.INLINES})\n if self.FIELDSETS: params.update({'fieldsets':self.FIELDSETS})\n if self.READ_ONLY: params.update({'readonly':self.READ_ONLY})\n\n createform = self.addmodel_class(**params)\n\n if hasattr(self, '_details') and self.USE_DETAILS_TO_ADD:\n self._list.hide()\n self._details.show()\n self._details.value = createform\n toolbar = [self.toolbar] if isinstance(self.toolbar, str) else self.toolbar\n if toolbar:\n for o in toolbar:\n if o and hasattr(self, o):\n getattr(self, o).hide()\n else:\n self._list.show()\n if hasattr(self, '_details'):\n self._details.hide()",
"def breadmodelform_factory(\n request, model, layout, instance=None, baseformclass=forms.models.ModelForm\n):\n formfieldelements = _get_form_fields_from_layout(layout)\n\n class BreadModelFormBase(baseformclass):\n field_order = baseformclass.field_order or [\n f.fieldname for f in formfieldelements\n ]\n\n def __init__(self, data=None, files=None, initial=None, **kwargs):\n inst = kwargs.get(\"instance\", instance)\n formsetinitial = {}\n for name, field in self.declared_fields.items():\n if isinstance(field, FormsetField):\n formsetinitial[name] = {\n \"instance\": inst,\n }\n if isinstance(field, GenericForeignKeyField):\n modelfield = model._meta.get_field(name)\n if hasattr(modelfield, \"lazy_choices\"):\n field.choices = GenericForeignKeyField.objects_to_choices(\n modelfield.lazy_choices(modelfield, request, inst)\n )\n init = getattr(inst, modelfield.name, None)\n if init:\n formsetinitial[name] = GenericForeignKeyField.object_to_choice(\n init\n )[0]\n if initial:\n formsetinitial.update(initial)\n super().__init__(\n data=data,\n files=files,\n initial=formsetinitial,\n **kwargs,\n )\n\n def save(self, *args, **kwargs):\n with transaction.atomic():\n kwargs[\"commit\"] = False\n forminstance = super().save(*args, **kwargs)\n # GenericForeignKey might need a resafe because we set the value\n for fieldname, field in self.fields.items():\n if isinstance(field, GenericForeignKeyField):\n setattr(forminstance, fieldname, self.cleaned_data[fieldname])\n forminstance.save()\n self.save_m2m()\n\n for fieldname, field in self.fields.items():\n if isinstance(field, FormsetField):\n self.cleaned_data[fieldname].instance = forminstance\n self.cleaned_data[fieldname].save()\n return forminstance\n\n # GenericForeignKey and one-to-n fields need to be added separatly to the form class\n attribs = {}\n for formfieldelement in formfieldelements:\n try:\n modelfield = model._meta.get_field(formfieldelement.fieldname)\n except FieldDoesNotExist:\n continue\n if isinstance(modelfield, GenericForeignKey):\n attribs[modelfield.name] = GenericForeignKeyField(\n required=not model._meta.get_field(modelfield.fk_field).blank\n )\n elif modelfield.one_to_many or (\n modelfield.one_to_one and not modelfield.concrete\n ):\n attribs[modelfield.name] = FormsetField(\n _generate_formset_class(\n request, model, modelfield, baseformclass, formfieldelement\n ),\n instance,\n formfieldelement.formsetinitial,\n )\n patched_formclass = type(\n f\"{model.__name__}BreadModelForm\", (BreadModelFormBase,), attribs\n )\n ret = forms.modelform_factory(\n model,\n form=patched_formclass,\n fields=[\n f.fieldname\n for f in formfieldelements\n if isinstance(f, _layout.form.FormField)\n ],\n formfield_callback=lambda field: _formfield_callback_with_request(\n field, request, model\n ),\n )\n return ret",
"def create_form(data, form_class, instance):\n if instance:\n form = form_class(instance=instance)\n if data:\n form = form_class(data, instance=instance)\n else:\n form = form_class()\n if data:\n form = form_class(data)\n return form",
"def get_form_class(self):\n \n \"\"\"\n Construct a form class that has all the fields and formsets named in\n the children of this edit handler. \n \"\"\"\n if not hasattr(self, 'model'):\n raise AttributeError(\n '%s is not bound to a model yet. Use `.bind_to(model=model)` '\n 'before using this method.' % self.__class__.__name__)\n # If a custom form class was passed to the EditHandler, use it.\n # Otherwise, use the rai_base_form_class from the model.\n # If that is not defined, use RAIAdminModelForm.\n model_form_class = getattr(self.model, 'rai_base_form_class',\n RAIAdminModelForm)\n base_form_class = self.base_form_class or model_form_class\n\n formsets = self.required_formsets()\n\n form_class = rai_modelform_factory(\n self.decorator.get_rai_model(),\n form_class=base_form_class,\n fields=self.required_internal_fields(),\n formsets=formsets,\n widgets=self.widget_overrides())\n form_class.readonly_fields = self.readonly_fields()\n return form_class",
"def form(self, name, python_type, optional=False, **kwargs):\n return self.simple_param('formData', name, python_type,\n optional=optional, **kwargs)",
"def as_form(cls: Type[BaseModel]):\n new_params = [\n inspect.Parameter(\n field.alias,\n inspect.Parameter.POSITIONAL_ONLY,\n default=(Form(field.default) if not field.required else Form(...)),\n )\n for field in cls.__fields__.values()\n ]\n\n async def _as_form(**data):\n return cls(**data)\n\n sig = inspect.signature(_as_form)\n sig = sig.replace(parameters=new_params)\n _as_form.__signature__ = sig # type: ignore\n setattr(cls, \"as_form\", _as_form)\n return cls",
"def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()",
"def get_form_class(self):\n form_options = self.get_form_options()\n # If a custom form class was passed to the EditHandler, use it.\n # Otherwise, use the base_form_class from the model.\n # If that is not defined, use WagtailAdminModelForm.\n model_form_class = getattr(self.model, \"base_form_class\", WagtailAdminModelForm)\n base_form_class = self.base_form_class or model_form_class\n\n return get_form_for_model(\n self.model,\n form_class=base_form_class,\n **form_options,\n )",
"def initialize_model(model_type, **kwargs):\n try:\n model_class = MODEL_DICT[model_type]\n except KeyError:\n raise RuntimeError(f\"Cannot find model class for {model_type}. Pick one of {list(MODEL_DICT.keys())}\")\n\n return model_class(**kwargs)",
"def create_form(self, resource=None, edit_form=False):\n form = UserForm(self.config_models, obj=resource)\n\n form.totp_enabled = self.totp_enabled\n\n session = self.session()\n self.update_form_collection(\n resource, edit_form, form.groups, self.Group, 'sorted_groups',\n 'id', 'name', session\n )\n self.update_form_collection(\n resource, edit_form, form.roles, self.Role, 'sorted_roles', 'id',\n 'name', session\n )\n session.close()\n\n return form",
"def get_book_form_for_create(self, book_record_types):\n # Implemented from template for\n # osid.resource.BinAdminSession.get_bin_form_for_create_template\n if self._catalog_session is not None:\n return self._catalog_session.get_catalog_form_for_create(catalog_record_types=book_record_types)\n for arg in book_record_types:\n if not isinstance(arg, ABCType):\n raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type')\n if book_record_types == []:\n result = objects.BookForm(\n runtime=self._runtime,\n effective_agent_id=self.get_effective_agent_id(),\n proxy=self._proxy) # Probably don't need effective agent id now that we have proxy in form.\n else:\n result = objects.BookForm(\n record_types=book_record_types,\n runtime=self._runtime,\n effective_agent_id=self.get_effective_agent_id(),\n proxy=self._proxy) # Probably don't need effective agent id now that we have proxy in form.\n self._forms[result.get_id().get_identifier()] = not CREATED\n return result",
"def new(self, *args, **kw):\n\n\t\t\tif len(args) > 0:\n\t\t\t\tkw['id_fase_fk']= args[0] \n\n\t\t\ttmpl_context.widget = self.new_form\n\t\t\tretorno \t\t= dict(value = kw, model = self.model.__name__)\n\t\t\tretorno['fid']\t= args[0]\n\n\t\t\treturn retorno",
"def _construct_form(self, i, **kwargs):\n defaults = {'auto_id': self.auto_id, 'prefix': self.add_prefix(i)}\n if self.is_bound:\n defaults['data'] = self.data\n defaults['files'] = self.files\n if self.initial:\n try:\n defaults['initial'] = self.initial[i]\n except IndexError:\n pass\n # Allow extra forms to be empty.\n if i >= self.initial_form_count():\n defaults['empty_permitted'] = True\n defaults.update(kwargs)\n form = self.form(self.params[len(self.params) - i - 1][1], self.params[len(self.params) - i - 1][0], i, **defaults) #passando o params[i] para o form[i]\n self.add_fields(form, i)\n return form",
"def model_form_factory(base=Form, meta=ModelFormMeta, **defaults):\n\n class ModelForm(six.with_metaclass(meta, base)):\n \"\"\"\n A function that returns SQLAlchemy session. This should be\n assigned if you wish to use Unique validator. If you are using\n Flask-SQLAlchemy along with WTForms-Alchemy you don't need to\n set this.\n \"\"\"\n get_session = None\n\n class Meta:\n model = None\n\n default = None\n\n #: Whether or not to skip unknown types. If this is set to True,\n #: fields with types that are not present in FormGenerator type map\n #: will be silently excluded from the generated form.\n #:\n #: By default this is set to False, meaning unknown types throw\n #: exceptions when encountered.\n skip_unknown_types = defaults.pop('skip_unknown_types', False)\n\n #: Whether or not to assign all fields as optional, useful when\n #: creating update forms for patch requests\n all_fields_optional = defaults.pop('all_fields_optional', False)\n\n validators = defaults.pop('validators', {})\n\n #: A dict with keys as field names and values as field arguments.\n field_args = defaults.pop('field_args', {})\n\n #: A dict with keys as field names and values as widget options.\n widget_options = defaults.pop('widget_options', {})\n\n #: Whether or not to include only indexed fields.\n only_indexed_fields = defaults.pop('only_indexed_fields', False)\n\n #: Whether or not to include primary keys.\n include_primary_keys = defaults.pop('include_primary_keys', False)\n\n #: Whether or not to include foreign keys. By default this is False\n #: indicating that foreign keys are not included in the generated\n #: form.\n include_foreign_keys = defaults.pop('include_foreign_keys', False)\n\n #: Whether or not to strip string fields\n strip_string_fields = defaults.pop('strip_string_fields', False)\n\n #: Whether or not to include datetime columns that have a default\n #: value. A good example is created_at column which has a default\n #: value of datetime.utcnow.\n include_datetimes_with_default = defaults.pop(\n 'include_datetimes_with_default', False\n )\n\n #: The default validator to be used for not nullable columns. Set\n #: this to `None` if you wish to disable it.\n not_null_validator = defaults.pop(\n 'not_null_validator',\n InputRequired()\n )\n\n #: A dictionary that overrides not null validation on type level.\n #: Keys should be valid SQLAlchemy types and values should be valid\n #: WTForms validators.\n not_null_validator_type_map = defaults.pop(\n 'not_null_validator_type_map',\n ClassMap(\n [(sa.String, [InputRequired(), DataRequired()])]\n )\n )\n\n #: Default email validator\n email_validator = Email\n\n #: Default length validator\n length_validator = Length\n\n #: Default unique validator\n unique_validator = Unique\n\n #: Default number range validator\n number_range_validator = NumberRange\n\n #: Default date range validator\n date_range_validator = DateRange\n\n #: Default time range validator\n time_range_validator = TimeRange\n\n #: Default optional validator\n optional_validator = Optional\n\n #: Which form generator to use. Only override this if you have a\n #: valid form generator which you want to use instead of the\n #: default one.\n form_generator = defaults.pop(\n 'form_generator', FormGenerator\n )\n\n #: Default date format\n date_format = defaults.pop('date_format', '%Y-%m-%d')\n\n #: Default datetime format\n datetime_format = defaults.pop(\n 'datetime_format', '%Y-%m-%d %H:%M:%S'\n )\n\n #: Dictionary of SQLAlchemy types as keys and WTForms field classes\n #: as values. The key value pairs of this dictionary override\n #: the key value pairs of FormGenerator.TYPE_MAP.\n #:\n #: Using this configuration option one can easily configure the\n #: type conversion in class level.\n type_map = defaults.pop('type_map', ClassMap())\n\n #: Whether or not to raise InvalidAttributExceptions when invalid\n #: attribute names are given for include / exclude or only\n attr_errors = defaults.pop('attr_errors', True)\n\n #: Additional fields to include in the generated form.\n include = defaults.pop('include', [])\n\n #: List of fields to exclude from the generated form.\n exclude = defaults.pop('exclude', [])\n\n #: List of fields to only include in the generated form.\n only = defaults.pop('only', [])\n\n def __init__(self, *args, **kwargs):\n \"\"\"Sets object as form attribute.\"\"\"\n\n self._obj = kwargs.get('obj', None)\n super(ModelForm, self).__init__(*args, **kwargs)\n\n if defaults:\n raise UnknownConfigurationOption(\n list(defaults.keys())[0]\n )\n\n return ModelForm",
"def get_form_class():\n return RazorPaymentForm",
"def get_family_form_for_create(self, family_record_types):\n # Implemented from template for\n # osid.resource.BinAdminSession.get_bin_form_for_create_template\n if self._catalog_session is not None:\n return self._catalog_session.get_catalog_form_for_create(catalog_record_types=family_record_types)\n for arg in family_record_types:\n if not isinstance(arg, ABCType):\n raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type')\n if family_record_types == []:\n result = objects.FamilyForm(\n runtime=self._runtime,\n effective_agent_id=self.get_effective_agent_id(),\n proxy=self._proxy) # Probably don't need effective agent id now that we have proxy in form.\n else:\n result = objects.FamilyForm(\n record_types=family_record_types,\n runtime=self._runtime,\n effective_agent_id=self.get_effective_agent_id(),\n proxy=self._proxy) # Probably don't need effective agent id now that we have proxy in form.\n self._forms[result.get_id().get_identifier()] = not CREATED\n return result",
"def make(model: Type[Model], **kwargs: Any) -> Model:\n return modelfactory_factory(model)(**kwargs)",
"def create_model(self):\n pass",
"def create_model(self):\n pass",
"def get_form(self, form_class):\n return form_class(**self.get_form_kwargs())",
"def get_form(cls, model_name):\n return cls.relations[model_name]",
"def create_view(request, title, modelform, **kwargs):\n instance_form = modelform(request.POST or None)\n if instance_form.is_valid():\n instance = instance_form.save(commit=False)\n for default in kwargs.keys():\n setattr(instance, default, kwargs[default])\n instance.save()\n messages.success(request, _(\"%s was created.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Create\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )",
"def __init__(self, model: Type[ModelType]):\n self.model = model",
"def __init__(self, model: Type[ModelType]):\n self.model = model"
]
| [
"0.6819877",
"0.6556186",
"0.64142656",
"0.6294211",
"0.6244579",
"0.61463284",
"0.60895634",
"0.5927193",
"0.5925178",
"0.5921908",
"0.5921233",
"0.5910685",
"0.5899504",
"0.58937675",
"0.5880354",
"0.58687985",
"0.5854467",
"0.58382326",
"0.58021164",
"0.57389766",
"0.5721072",
"0.5706295",
"0.5701623",
"0.5700436",
"0.5700436",
"0.56845105",
"0.56793475",
"0.56486326",
"0.5625329",
"0.5625329"
]
| 0.7316288 | 0 |
Create a new doc from the model type and form data. | def _doc_create(type, data):
doc = dict(data)
doc.update({'model_type': type})
return doc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def document_new():\n\n t = request.form['type']\n if t == 'book':\n doc = Book(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n edition=request.form['edition'],\n publisher=request.form['publisher'],\n publishment_year=request.form['publishment_year'],\n bestseller='bestseller' in request.form,\n reference='reference' in request.form\n )\n elif t == 'av':\n doc = AVMaterial(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors'])\n )\n elif t == 'article':\n doc = JournalArticle(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n issue_editor=request.form['issue_editor'],\n issue_publication_date=request.form['issue_publication_date'],\n journal=request.form['journal']\n )\n\n for i in range(int(request.form['copies'])):\n dc = DocumentCopy(document=doc)\n\n db.session.add(doc)\n db.session.commit()\n\n log(session['login'], 'created', 'document {}'.format(doc.id))\n\n # TODO\n return redirect('/admin/documents')",
"async def create_doc(self, *args, **kwargs):\n pass",
"def create_document(self):\n # set single values\n if len(self.field_values) > 0:\n self._set_field_values()\n\n # set multi values\n if len(self.block_field_values) > 0:\n self._set_multi_field_values()\n\n self.field_values = {}\n self.block_field_values = {}\n\n self.client.service.CreateDocument()",
"def create(self, request, *args, **kwargs):\n logger.debug(u'DocumentDefinition.create ...')\n logger.debug(u'DocumentDefinition.create :: REQUEST: {}'.format(request.REQUEST))\n version = request.version\n if '@' in version:\n branch_name, tag_name = version.split('@')\n else:\n tag_name = version\n branch_name = None\n logger.debug(u'DocumentDefinition.create :: tag: {}'.format(tag_name))\n now_es = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n if len(kwargs) == 0:\n raise exceptions.XimpiaAPIException(_(u'No document type sent'))\n doc_type = kwargs['doc_type']\n logger.debug(u'DocumentDefinition.create :: doc_type: {}'.format(doc_type))\n # resolve index based on request host for site\n site_slug = get_site(request)\n index = '{}__base'.format(site_slug)\n logger.debug(u'DocumentDefinition.create :: index: {}'.format(index))\n ###############\n # validations\n ###############\n # check user request and user is admin\n if not request.user or (request.user and not request.user.id):\n raise exceptions.XimpiaAPIException(_(u'User needs to be authenticated'))\n user = request.user\n logger.debug(u'DocumentDefinition.create :: request.user: {}'.format(user))\n groups = user.document['groups']\n logger.debug(u'DocumentDefinition.create :: groups: {}'.format(groups))\n admin_groups = filter(lambda x: x['name'] == 'admin', groups)\n if not admin_groups:\n raise exceptions.XimpiaAPIException(_(u'User needs to be admin'))\n # generate mappings\n doc_def = DocumentDefinition(json.loads(request.body), doc_type, user, tag_name=tag_name,\n branch_name=branch_name)\n document_definition_input = doc_def.logical\n logger.info(u'DocumentDefinition.create :: document_definition_input: {}'.format(\n pprint.PrettyPrinter(indent=4).pformat(document_definition_input)))\n bulk_queries = list()\n # Check db validations: tag exists, document definition not exists, no fields\n bulk_queries.append(\n (json.dumps(\n {\n 'index': index,\n 'type': 'document-definition'\n }\n ), json.dumps(\n {\n 'query': {\n 'match_all': {}\n },\n 'filter': {\n 'term': {\n 'document-definition__doc_type__v1.raw__v1': doc_type\n }\n }\n }\n )\n )\n )\n # meta_data = document_definition_input['_meta']\n # Check mapping does not exist\n es_response_raw = requests.get(\n '{host}/{index}/_mapping/{doc_type}'.format(\n host=settings.ELASTIC_SEARCH_HOST,\n index=index,\n doc_type=doc_type\n )\n )\n existing_mapping = es_response_raw.json()\n if existing_mapping:\n raise exceptions.XimpiaAPIException(_(u'Document definition already exists :: {}'.format(\n existing_mapping\n )))\n # Check no fields for doc type\n logger.debug(u'DocumentDefinition.create :: mapping in ES: {}'.format(es_response_raw.content))\n\n bulk_queries.append(\n (json.dumps(\n {\n 'index': index,\n 'type': 'field-version'\n }\n ), json.dumps(\n {\n 'query': {\n 'match_all': {}\n },\n 'filter': {\n 'term': {\n 'field-version__doc_type__v1.raw__v1': doc_type\n }\n }\n }\n )\n )\n )\n # Validate tag exists\n bulk_queries.append(\n (json.dumps(\n {\n 'index': index,\n 'type': 'tag'\n }\n ), json.dumps(\n {\n 'query': {\n 'match_all': {}\n },\n 'filter': {\n 'term': {\n 'tag__slug__v1.raw__v1': slugify(tag_name)\n }\n }\n }\n )\n )\n )\n # print ''.join(map(lambda x: '{}\\n'.format(x[0]) + '{}\\n'.format(x[1]), bulk_queries))\n es_response_raw = requests.get(\n '{host}/_msearch'.format(\n host=settings.ELASTIC_SEARCH_HOST\n ),\n data=''.join(map(lambda x: '{}\\n'.format(x[0]) + '{}\\n'.format(x[1]), bulk_queries))\n )\n es_response = es_response_raw.json()\n logger.info(u'DocumentDefinition.create :: response validations: {}'.format(\n es_response\n ))\n responses = es_response.get('responses', [])\n if responses[0]['hits']['total'] > 0:\n raise exceptions.XimpiaAPIException(_(u'Document definition already exists'))\n if responses[1]['hits']['total'] > 0:\n raise exceptions.XimpiaAPIException(_(u'Document definition already exists'))\n if responses[2]['hits']['total'] == 0:\n raise exceptions.XimpiaAPIException(_(u'Tag does not exist'))\n ##################\n # End validations\n ##################\n\n # Build data\n doc_mapping = doc_def.get_mappings()\n fields_version_str = doc_def.get_field_versions(index, user)\n # Create document definition document\n physical = doc_def.get_physical()\n logger.debug(u'_create_index :: document definition: {}'.format(\n pprint.PrettyPrinter(indent=4).pformat(physical))\n )\n es_response_raw = requests.post(\n '{host}/{index}/{doc_type}'.format(\n host=settings.ELASTIC_SEARCH_HOST,\n index=u'{}__document-definition'.format(index),\n doc_type='document-definition'\n ),\n data=json.dumps(\n physical\n )\n )\n es_response = es_response_raw.json()\n document_created = es_response\n logger.info(u'DocumentDefinition.create :: response create document definition: {}'.format(\n es_response\n ))\n if 'error' in es_response and es_response['error']:\n raise exceptions.XimpiaAPIException(u'Error creating document definition')\n # Bulk insert for all fields\n # print fields_version_str\n es_response_raw = requests.post(\n '{host}/_bulk'.format(host=settings.ELASTIC_SEARCH_HOST),\n data=fields_version_str,\n headers={'Content-Type': 'application/octet-stream'},\n )\n es_response = es_response_raw.json()\n logger.info(u'DocumentDefinition.create :: response create field versions: {}'.format(\n es_response\n ))\n if 'errors' in es_response and es_response['errors']:\n raise exceptions.XimpiaAPIException(u'Error creating fields')\n # Create mapping\n logger.debug(u'DocumentDefinition.create :: mappings: {}'.format(\n pprint.PrettyPrinter(indent=4).pformat(doc_mapping)\n ))\n es_response_raw = requests.put(\n '{host}/{index}/_mapping/{doc_type}'.format(\n host=settings.ELASTIC_SEARCH_HOST,\n index=index,\n doc_type=doc_type\n ),\n data=json.dumps(doc_mapping)\n )\n es_response = es_response_raw.json()\n logger.info(u'DocumentDefinition.create :: response put mapping: {}'.format(es_response))\n if 'error' in es_response and es_response['error']:\n raise exceptions.XimpiaAPIException(u'Error in saving mappings')\n # output document\n output_document = json.loads(request.body)\n output_document['_id'] = document_created['_id']\n output_document['_version'] = document_created['_version']\n return Response(output_document)",
"def new_document(klass, name=None, author=None):\n doc = Factory.new_document(klass, author)\n doc.name = name\n doc._osl.id = uuid.uuid4()\n return doc",
"def create_document(self, data):\n command = CreateDocumentFromOneOffixxTemplateCommand(self.context, data['title'], data['template'])\n return command.execute()",
"def create(init_document: 'Document') -> 'DocumentArray':",
"def add_document():\n\n user = User(root_uri=os.environ['ROOT_BACKEND_URI'])\n all_users = user.get_all_users()\n\n context = {\n 'all_users': all_users\n }\n\n if request.method == 'POST':\n creators_ids = request.form.getlist('choose_creators') # if there is no such name, returns empty list\n controllers_ids = request.form.getlist('choose_controllers')\n\n request_form = dict(request.form)\n request_form.pop('choose_creators') # there is no need in it now\n request_form.pop('choose_controllers')\n\n request_form['creators_ids'] = creators_ids\n request_form['controllers_ids'] = controllers_ids\n\n request_form['date_of_creation'] = datetime.strptime(request_form['date_of_creation'],\n '%Y-%m-%d')\n request_form['date_of_registration'] = datetime.strptime(request_form['date_of_registration'],\n '%Y-%m-%d')\n\n add_new_document_schema = AddNewDocument()\n errors = add_new_document_schema.validate(data=request_form)\n\n if errors:\n abort(400, str(errors))\n\n args = add_new_document_schema.dump(request_form)\n\n document = Document(root_uri=os.environ['ROOT_BACKEND_URI'])\n document.add_document(\n document_name=args['document_name'],\n document_type=args['document_type'],\n date_of_creation=args['date_of_creation'],\n date_of_registration=args['date_of_registration'],\n controllers_ids=args['controllers_ids'],\n creators_ids=args['creators_ids'],\n )\n\n return redirect(url_for('show_documentation.show_documents'))\n\n return render_template('pages/inputs/add_document.html', **context)",
"def add_document():\n\n user = User(connection=connection, cursor=cursor)\n all_users = user.get_all_users()\n\n context = {\n 'all_users': all_users\n }\n\n if request.method == 'POST':\n creators_ids = request.form.getlist('choose_creators') # if there is no such name, returns empty list\n controllers_ids = request.form.getlist('choose_controllers')\n\n request_form = dict(request.form)\n request_form.pop('choose_creators') # there is no need in it now\n request_form.pop('choose_controllers')\n\n request_form['creators_ids'] = creators_ids\n request_form['controllers_ids'] = controllers_ids\n\n request_form['date_of_creation'] = datetime.strptime(request_form['date_of_creation'],\n '%Y-%m-%d')\n request_form['date_of_registration'] = datetime.strptime(request_form['date_of_registration'],\n '%Y-%m-%d')\n\n add_new_document_schema = AddNewDocument()\n errors = add_new_document_schema.validate(data=request_form)\n\n if errors:\n abort(400, str(errors))\n\n args = add_new_document_schema.dump(request_form)\n\n document = Document(connection=connection, cursor=cursor)\n document.add_document(\n document_name=args['document_name'],\n document_type=args['document_type'],\n date_of_creation=args['date_of_creation'],\n date_of_registration=args['date_of_registration'],\n controllers_ids=args['controllers_ids'],\n creators_ids=args['creators_ids'],\n )\n\n return redirect(url_for('documentation.show_documents'))\n\n return render_template('pages/inputs/add_document.html', **context)",
"def save( self, request, idx ) :\n\n if idx != 'None' :\n obj = models.Document.objects.get( id = idx )\n obj.element = self.cleaned_data['element']\n obj.type = self.cleaned_data['type']\n obj.name = self.cleaned_data['name']\n\n else :\n obj = models.Document.objects.get_or_create(element = self.cleaned_data['element'],\n type = self.cleaned_data['type'],\n name = self.cleaned_data['name'],\n author = request.user )[0]\n\n obj.link = self.cleaned_data['link']\n obj.save()\n\n return obj",
"def build_document(self, labels_from_json):\n if not len(self.raw_labels):\n self.get_labels(labels_from_json)\n raw_text = self.instance_input_file.read()\n document = self.DOCUMENT_CLASS(self.identifier, title=self.identifier)\n document.build_from_text(raw_text, start_index=0)\n for start_index, end_index in self.raw_labels:\n document.add_label_for_position(\n 'claim', int(start_index), int(end_index))\n return document",
"def _create_document(result_dict):\n document = Document(\n name=result_dict['docname'],\n original_id=result_dict['itemid'],\n doctype=result_dict['doctype'],\n language=result_dict['languageisocode'],\n conclusion=result_dict['conclusion'],\n originatingbody=result_dict['originatingbody'],\n application=result_dict['application'],\n )\n return document",
"def createDocument(self, document):\n data = self.createDocumentAll([document])\n try:\n return data[0]\n except: pass",
"def create_new_doc(self, doc: Doc, min_prob: float = 0.25) -> Doc:\n\n # print(\"running on\", doc[:10])\n\n if not self.form_frequencies:\n raise RuntimeError(\n \"Cannot truecase without a dictionary of form frequencies\")\n\n tokens = []\n spaces = []\n doctext = doc.text\n for tok in doc:\n toktext = tok.text\n\n # We only change casing for words in Title or UPPER\n if tok.is_alpha and toktext[0].isupper():\n cond1 = tok.is_upper and len(toktext) > 2 # word in uppercase\n cond2 = toktext[0].isupper(\n ) and not tok.is_sent_start # titled word\n if cond1 or cond2:\n token_lc = toktext.lower()\n if token_lc in self.form_frequencies:\n frequencies = self.form_frequencies[token_lc]\n if frequencies.get(toktext, 0) < min_prob:\n alternative = sorted(\n frequencies.keys(), key=lambda x: frequencies[x])[-1]\n\n # We do not change from Title to to UPPER\n if not tok.is_title or not alternative.isupper():\n toktext = alternative\n\n tokens.append(toktext)\n\n # Spacy needs to know whether the token is followed by a space\n if tok.i < len(doc)-1:\n spaces.append(doctext[tok.idx+len(tok)].isspace())\n else:\n spaces.append(False)\n\n # Creates a new document with the tokenised words and space information\n doc2 = Doc(self.model.vocab, words=tokens, spaces=spaces) #type: ignore\n # print(\"finished with doc\", doc2[:10])\n return doc2",
"def build_document(self):\n pass",
"def create_from_dict(new_info: dict):\n doc = Db2Document(mhr_number=new_info.get('mhrNumber'),\n document_type=new_info.get('documentType'),\n document_reg_id=new_info.get('documentRegistrationId'),\n interimed=new_info.get('interimed', ''),\n owner_cross_reference=new_info.get('ownerCrossReference', ''),\n interest_denominator=new_info.get('interestDenominator', 0),\n declared_value=new_info.get('declaredValue', 0),\n own_land=new_info.get('ownLand', ''),\n routing_slip_number=new_info.get('routingSlipNumber', ''))\n doc.last_service = new_info.get('lastService', '')\n doc.bcol_account = new_info.get('bcolAccount', '')\n doc.dat_number = new_info.get('datNumber', '')\n doc.examiner_id = new_info.get('examinerId', '')\n doc.update_id = new_info.get('updateId', '')\n doc.phone_number = new_info.get('phoneNumber', '')\n doc.attention_reference = new_info.get('attentionReference', '')\n doc.name = new_info.get('name', '')\n doc.legacy_address = new_info.get('legacyAddress', '')\n doc.number_of_pages = new_info.get('numberOfPages', 0)\n doc.consideration_value = new_info.get('considerationValue', '')\n doc.affirm_by_name = new_info.get('affirmByName', '')\n doc.liens_with_consent = new_info.get('liensWithConsent', '')\n doc.client_reference_id = new_info.get('clientReferenceId', '')\n if new_info.get('createDateTime', None):\n doc.registration_ts = model_utils.ts_from_iso_format(new_info.get('createDateTime'))\n if new_info.get('draftDateTime', None):\n doc.draft_ts = model_utils.ts_from_iso_format(new_info.get('draftDateTime'))\n if new_info.get('transferExecutionDate', None):\n date_val: str = str(new_info.get('transferExecutionDate'))[0:10]\n doc.transfer_execution_date = model_utils.date_from_iso_format(date_val)\n return doc",
"def build(self):\n labelled_documents = self.get_labelled_documents_queryset()\n\n self.model = self.build_model(labelled_documents)\n self.save_model()",
"def create_document(document: DocumentIn, db: Session = Depends(get_db)):\n return add_document(db, document)",
"def create(self, validated_data):",
"def create_new_doc(self, doc: Doc) -> Doc:\n\n return spacy.tokens.Doc(self.model.vocab, [tok.text for tok in doc], #type: ignore\n [tok.whitespace_ for tok in doc])",
"def get_model(self):\n return Doc()",
"def test_client_document_create(self):\n pass",
"def post(self):\r\n data = request.form\r\n return create(data=data)",
"def post_model_data(request):\n modelname = request.matchdict['modelname']\n data_doc = {\n 'type': 'data',\n 'model': modelname,\n 'data': json.loads(request.body)\n }\n _id, rev = request.db.save(data_doc)\n return {'id': _id}",
"def test_create_labelled_document(self):\n model_name = TestSingleLabelClassifierModel.get_name()\n\n document = Document.objects.create()\n self.assertFalse(LabelledDocument.objects.exists())\n\n url = reverse('django_learnit:document-labelling', kwargs={\n 'name': model_name,\n 'pk': document.pk\n })\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n form = response.context['form']\n self.assertIsInstance(form, SingleLabelClassifierForm)\n\n data = form.initial\n data['label'] = '1'\n\n self.client.post(url, data)\n\n labelled_document = LabelledDocument.objects.get_for_document(\n document, model_name)\n\n expected_value = LabelledDocument.serialize_value({\n 'label': '1'\n })\n\n self.assertEqual(labelled_document.value, expected_value)",
"def saveDocument(self, doc, REQUEST, creation=False):\n method = 'saveDocument'\n\n db = self.db\n form_name = REQUEST.get('Form') or doc.getItem('Form')\n\n form = db.getForm(form_name)\n\n errors=form.validateInputs(REQUEST, doc=doc)\n if len(errors)>0:\n for err in errors:\n self.set_error(method, err)\n else:\n doc.setItem('Form', form.getFormName())\n\n # process editable fields (we read the submitted value in the request)\n form.readInputs(doc, REQUEST, process_attachments=True)\n\n # refresh computed values, run onSave, reindex\n self.save(doc, form=form, creation=creation)",
"def create_new_model(\n self, content: Optional[Dict[str, Any]], model_name: str, columns_sql: Sequence[str]\n ) -> Dict[str, Any]:\n logger.info(f\"The model '{model_name}' has not been documented yet. Creating a new entry.\")\n columns = []\n for column_sql in columns_sql:\n description = self.get_column_description_from_dbt_definitions(column_sql)\n columns.append({\"name\": column_sql, \"description\": description})\n model = {\n \"name\": model_name,\n \"description\": MODEL_NOT_DOCUMENTED,\n \"columns\": columns,\n }\n if not content:\n content = {\"version\": 2, \"models\": [model]}\n else:\n content[\"models\"].append(model)\n return content",
"def create_from_registration(registration, reg_json, doc_type: str, local_ts):\n doc_id = reg_json.get('documentId', '')\n doc = Db2Document(id=doc_id,\n mhr_number=registration.mhr_number,\n document_type=doc_type,\n document_reg_id=registration.doc_reg_number,\n registration_ts=local_ts,\n draft_ts=local_ts,\n interimed='',\n owner_cross_reference='',\n interest_denominator=0,\n declared_value=reg_json.get('declaredValue', 0),\n routing_slip_number='')\n doc.last_service = ''\n doc.bcol_account = ''\n doc.dat_number = ''\n doc.examiner_id = ''\n doc.update_id = ''\n doc.number_of_pages = 0\n doc.consideration_value = reg_json.get('consideration', '')\n doc.affirm_by_name = ''\n if reg_json.get('affirmByName'):\n doc.affirm_by_name = str(reg_json.get('affirmByName')).upper()[0:40]\n doc.liens_with_consent = ''\n if reg_json.get('submittingParty'):\n submitting = reg_json.get('submittingParty')\n if submitting.get('phoneNumber'):\n doc.phone_number = str(submitting.get('phoneNumber'))[0:10]\n else:\n doc.phone_number = ''\n doc.name = Db2Document.to_db2_submitting_name(submitting)\n doc.legacy_address = address_utils.to_db2_address(submitting.get('address'))\n else:\n doc.phone_number = ''\n doc.name = ''\n doc.legacy_address = ''\n if reg_json.get('attentionReference'):\n doc.attention_reference = str(reg_json['attentionReference'])[0:40]\n else:\n doc.attention_reference = ''\n if registration.client_reference_id:\n doc.client_reference_id = registration.client_reference_id[0:30]\n else:\n doc.client_reference_id = ''\n if doc_type in (Db2Document.DocumentTypes.TRANS,\n Db2Document.DocumentTypes.TRAND,\n Db2Document.DocumentTypes.TRANS_ADMIN,\n Db2Document.DocumentTypes.TRANS_AFFIDAVIT,\n Db2Document.DocumentTypes.TRANS_WILL):\n if reg_json.get('transferDate'):\n doc.transfer_execution_date = model_utils.date_from_iso_format(str(reg_json['transferDate'])[0:10])\n else:\n doc.transfer_execution_date = local_ts.date()\n else:\n doc.transfer_execution_date = model_utils.date_from_iso_format('0001-01-01')\n if reg_json.get('ownLand'):\n doc.own_land = 'Y'\n else:\n doc.own_land = 'N'\n return doc",
"def create_model(self, ApiId: str, Name: str, Schema: str, ContentType: str = None, Description: str = None) -> Dict:\n pass",
"def create_model_definition(request):\n modelname = request.matchdict['modelname']\n results = db_model_token(request.db)[modelname]\n tokens = [t.value for t in results]\n if len(tokens) > 0:\n token = tokens[0]\n if token != request.GET.get('token'):\n # provided token does not match\n request.errors.add('query', 'token',\n 'invalid token for model %s' % modelname)\n request.errors.status = 403\n return json_error(request.errors)\n else:\n # Generate a unique token\n token = os.urandom(8).encode('hex')\n token_doc = {'type': 'token', 'token': token, 'model': modelname}\n request.db.save(token_doc)\n\n model_doc = {\n 'type': 'definition',\n 'model': modelname,\n 'definition': json.loads(request.body)\n }\n request.db.save(model_doc) # save to couchdb\n return {'token': token}"
]
| [
"0.7059826",
"0.68696034",
"0.67143714",
"0.65898913",
"0.6445171",
"0.6390132",
"0.63503194",
"0.62521714",
"0.6249602",
"0.6166081",
"0.61429954",
"0.61121595",
"0.6060808",
"0.605228",
"0.60333145",
"0.59959775",
"0.59767336",
"0.5920667",
"0.5892497",
"0.58733225",
"0.5859454",
"0.5841781",
"0.5837815",
"0.580354",
"0.57536626",
"0.5736235",
"0.5735984",
"0.57158375",
"0.57088363",
"0.56916153"
]
| 0.80970454 | 0 |
Retrieve the adminish config from the request. | def _config(request):
return request.environ['adminish'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_config(req):\n #try:\n # user_id = req.user\n #except KeyError as e:\n # msg = req.get_error_msg(e)\n # return send_error_response(msg)\n try:\n config = tools_config_get_config(req)\n except Exception:\n raise http_exc.HTTPClientError()\n else:\n return Response(json_body=json.dumps(config), content_type='application/json')",
"def _get_lsp_config_admin_up(self):\n return self.__lsp_config_admin_up",
"def get_info_admin(self):\n return self.get_info(\"HS_ADMIN\")",
"def administrator_configuration(self) -> Optional['outputs.AdministratorConfigurationResponse']:\n return pulumi.get(self, \"administrator_configuration\")",
"def administrator_configuration(self) -> Optional[pulumi.Input['AdministratorConfigurationArgs']]:\n return pulumi.get(self, \"administrator_configuration\")",
"def administrator_configuration(self) -> Optional[pulumi.Input['AdministratorConfigurationArgs']]:\n return pulumi.get(self, \"administrator_configuration\")",
"def get_configuration():\r\n if not hasattr(CURRENT_REQUEST_CONFIGURATION, 'data'):\r\n return {}\r\n\r\n return CURRENT_REQUEST_CONFIGURATION.data",
"def config(self, request):\n config = OtterConfig(self.store, self.tenant_id, self.group_id,\n self.dispatcher)\n return config.app.resource()",
"def getAdmin():",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config"
]
| [
"0.6695683",
"0.65137017",
"0.64466333",
"0.6323818",
"0.62771636",
"0.62771636",
"0.620728",
"0.6170023",
"0.6149542",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767"
]
| 0.79236895 | 0 |
Create a widget registry from the config, defaulting to couchish's default. | def _widget_registry(request):
factory = _config(request).get('widget_registry_factory') or WidgetRegistry
return factory(_store(request)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register(widget):\n w = widget.class_traits()\n _registry.register(w['_model_module'].default_value,\n w['_model_module_version'].default_value,\n w['_model_name'].default_value,\n w['_view_module'].default_value,\n w['_view_module_version'].default_value,\n w['_view_name'].default_value,\n widget)\n return widget",
"def widget_load_config(self, plugman):\r\n pass",
"def build_from_config(\n config, registry, default_args=None, match_object_args=False\n):\n if config is None:\n return None\n\n assert isinstance(config, dict) and \"name\" in config\n assert isinstance(default_args, dict) or default_args is None\n\n name = config[\"name\"]\n name = name.replace(\"-\", \"_\")\n obj = registry.get(name)\n if obj is None:\n raise KeyError(f\"{name} is not in the {registry.name} registry\")\n\n print(f\"[Loaded {name} path] {inspect.getfile(obj)}\")\n\n args = dict()\n if \"params\" in config:\n args.update(config[\"params\"])\n if default_args is not None:\n args.update(default_args)\n\n if match_object_args:\n if inspect.isclass(obj):\n obj_args = inspect.getfullargspec(obj.__init__).args\n else:\n obj_args = inspect.getfullargspec(obj).args\n valid_args = set(args.keys()) & set(obj_args)\n invalid_args = set(args.keys()) - set(obj_args)\n args = {k: v for k, v in args.items() if k in valid_args}\n if len(invalid_args):\n print(f\"[Ignore args] {invalid_args}\")\n\n if (name in kvt.registry.METRICS._obj_dict.keys()) and (\n inspect.isfunction(obj)\n ):\n o = functools.partial(obj, **args)\n else:\n o = obj(**args)\n\n return o",
"def from_config(cls, xknx, name, config):\n group_address = config.get(\"group_address\")\n scene_number = int(config.get(\"scene_number\"))\n return cls(\n xknx, name=name, group_address=group_address, scene_number=scene_number\n )",
"def test_widgets_registry(self):\n from pages import widgets_registry as wreg\n for widget in wreg.registry:\n w = widget()\n w.render('name', 'value')\n\n try:\n wreg.register_widget(wreg.registry[0])\n raise AssertionError(\"Error not raised properly.\")\n except wreg.WidgetAlreadyRegistered:\n pass\n\n try:\n wreg.get_widget('wrong')\n raise AssertionError(\"Error not raised properly.\")\n except wreg.WidgetNotFound:\n pass",
"def get_registry(self, app):\n # if no registry key is defined in the configuration, return\n if not app.containers.registry:\n return None\n\n # cache the registry object so that it is not recreated\n # during each image pull, which would re-execute a docker login\n # for each pull (it takes 3-5 seconds)\n if not self.registry:\n # Work out what registry plugin to use\n plugin_name, registry_data = app.containers.registry.split(\":\", 1)\n # Call the plugin to log in/etc to the registry\n registry_plugins = app.get_catalog_items(\"registry\")\n if plugin_name == \"plain\":\n # The \"plain\" plugin is a shortcut for \"no plugin\"\n self.registry = BasicRegistryHandler(app, registry_data)\n elif plugin_name in registry_plugins:\n self.registry = registry_plugins[plugin_name](app, registry_data)\n else:\n raise BadConfigError(\"No registry plugin for {} loaded\".format(plugin_name))\n\n return self.registry",
"def _from_config(cls, config, **kwargs):\n return cls(config, **kwargs)",
"def from_config(config: dict):\n pass",
"def config():\n return Config()",
"def config():\n return Config()",
"def __init__(self, **configs): \n self.pkg = \"sdpp_explore\"\n\n self.__dict__.update(configs)",
"def __init__(self):\n self.registry = {}",
"def widget_from_setting(\n recorder: dict,\n key: str,\n group: str,\n element: str,\n override: Union[float, None] = None,\n indent: bool = False,\n) -> dict:\n\n _ = group[element]\n\n if key not in recorder:\n recorder[key] = {}\n\n if \"description\" in _:\n tooltip = _[\"description\"]\n else:\n tooltip = \"\"\n\n value = _[\"default\"]\n\n if override:\n value = override\n\n if indent:\n c1, c2 = st.columns((1, 8))\n else:\n c2 = st\n\n if _[\"type\"] == \"doublespinbox\":\n recorder[key][element] = c2.slider(\n element,\n min_value=float(_[\"min\"]),\n max_value=float(_[\"max\"]),\n value=float(value),\n help=tooltip,\n )\n elif _[\"type\"] == \"spinbox\":\n recorder[key][element] = c2.slider(\n element, min_value=_[\"min\"], max_value=_[\"max\"], value=value, help=tooltip\n )\n elif _[\"type\"] == \"checkbox\":\n recorder[key][element] = c2.checkbox(element, value=value, help=tooltip)\n elif _[\"type\"] == \"checkgroup\":\n opts = list(_[\"value\"].keys())\n recorder[key][element] = c2.multiselect(\n label=element, options=opts, default=value, help=tooltip\n )\n elif _[\"type\"] == \"combobox\":\n recorder[key][element] = c2.selectbox(\n label=element, options=_[\"value\"], index=_[\"value\"].index(value), help=tooltip\n )\n elif _[\"type\"] == \"string\":\n recorder[key][element] = c2.text_input(label=element, default=value, help=tooltip)\n else:\n st.write(f\"Not understood {_}\")\n\n return recorder",
"def __init__(self, config):\n backends = {}\n for k in [x for x in config.keys() if x != 'DEFAULT']:\n backends[k] = config[k]['backend']\n\n # Create base WebNip object\n self.webnip = WebNip(\n modules_path=config['DEFAULT']['modules_path']\n )\n\n # Create backends\n self.backends = []\n for name, module in backends.items():\n try:\n LOGGER.info('Building backend for %s...', name)\n self.backends.append(\n self.webnip.load_backend(\n module,\n name,\n params={\n # Get params, calling the subcommands if necessary\n k: eventually_call_command(v)\n for k, v in config[name].items()\n }\n )\n )\n except Exception as exc:\n LOGGER.error(\n 'An error occured while building backend %s: %s',\n name,\n str(exc)\n )\n if DEBUG:\n raise",
"def _build_data_connector_from_config(\n self,\n name: str,\n config: Dict[str, Any],\n ) -> DataConnector:\n new_data_connector: DataConnector = instantiate_class_from_config(\n config=config,\n runtime_environment={\n \"name\": name,\n \"datasource_name\": self.name,\n \"execution_engine\": self.execution_engine,\n },\n config_defaults={\n \"module_name\": \"great_expectations.datasource.data_connector\"\n },\n )\n new_data_connector.data_context_root_directory = (\n self._data_context_root_directory # type: ignore[assignment]\n )\n\n self.data_connectors[name] = new_data_connector\n return new_data_connector",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def create_widgets( self ):",
"def build_reg_from_config(model, reg_config):\n reg_class = reg_config['class']\n reg_args = {k: v for k, v in reg_config.items() if k != 'class'}\n reg = globals()[reg_class](model, **reg_args)\n return reg",
"def from_config(cls, config):\n return cls(**config)",
"def from_config(cls, config):\n return cls(**config)",
"def create_widgets(self):",
"def registry (root, access=Registry.DEFAULT_ACCESS, accept_value=True):\n if root is None:\n return None\n elif isinstance (root, Registry):\n return root\n elif isinstance (root, basestring):\n return Registry.from_string (root, access=access, accept_value=accept_value)\n else:\n raise x_registry (errctx=u\"registry\", errmsg=u\"root must be None, an existing key or a moniker\")",
"def config( **kwargs ):",
"def buildWidgetCollection(self, widgetDom):\n widgetNodes = widgetDom.getElementsByTagName('widgets')[0].childNodes\n for widget in widgetNodes:\n if (widget.nodeType == Node.ELEMENT_NODE):\n #Create a new widget of the type as specified in the xml file \n newWidget = Widget()\n newWidget.type = widget.attributes['type'].value\n parameterList = widget.getElementsByTagName('parameter')\n #Look into the parameter list and build parameter dictionary\n for parameter in parameterList:\n if parameter.attributes != None:\n name = parameter.attributes['name'].value\n value = parameter.attributes['value'].value\n newWidget.parameters[name] = value\n self.widgetCollection.append(newWidget)",
"def widget(self, widget_id):\r\n return resources.Widget(self, widget_id)",
"def from_config(cls, xknx, name, config):\n group_address = \\\n config.get('group_address')\n group_address_state = \\\n config.get('group_address_state')\n\n return cls(xknx,\n name,\n group_address=group_address,\n group_address_state=group_address_state)"
]
| [
"0.5875929",
"0.5706359",
"0.5351235",
"0.5184889",
"0.51765466",
"0.5141085",
"0.51340145",
"0.5107969",
"0.51062024",
"0.51062024",
"0.50923616",
"0.5079311",
"0.5072025",
"0.5058826",
"0.5034717",
"0.5029233",
"0.5029233",
"0.5029233",
"0.5029233",
"0.5029233",
"0.50091606",
"0.50085294",
"0.50082815",
"0.50082815",
"0.49809626",
"0.4978403",
"0.49498802",
"0.48850363",
"0.4872757",
"0.48635146"
]
| 0.71795803 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.